From 748308b389b4ff717265e6aa7fc89b470403b6b8 Mon Sep 17 00:00:00 2001 From: "ci.datadog-api-spec" Date: Thu, 9 Apr 2026 21:37:34 +0000 Subject: [PATCH] Regenerate client from commit b942564 of spec repo --- config/_default/menus/api.en.yaml | 238 + content/en/api/latest/org-groups/_index.md | 3 + .../v2/observability-pipelines/examples.json | 14 +- content/en/api/v2/org-groups/_index.md | 4 + content/en/api/v2/org-groups/examples.json | 2161 +++++++ data/api/v2/full_spec.yaml | 5729 ++++++++++++----- data/api/v2/translate_actions.json | 88 + data/api/v2/translate_tags.json | 4 + 8 files changed, 6517 insertions(+), 1724 deletions(-) create mode 100644 content/en/api/latest/org-groups/_index.md create mode 100644 content/en/api/v2/org-groups/_index.md create mode 100644 content/en/api/v2/org-groups/examples.json diff --git a/config/_default/menus/api.en.yaml b/config/_default/menus/api.en.yaml index 7f555c8337a..adab6a83795 100644 --- a/config/_default/menus/api.en.yaml +++ b/config/_default/menus/api.en.yaml @@ -11912,6 +11912,244 @@ menu: - ListOrgConnections unstable: [] order: 100 + - name: Org Groups + url: /api/latest/org-groups/ + identifier: org-groups + generated: true + - name: Update an org group + url: '#update-an-org-group' + identifier: org-groups-update-an-org-group + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - UpdateOrgGroup + unstable: + - v2 + order: 4 + - name: Get an org group + url: '#get-an-org-group' + identifier: org-groups-get-an-org-group + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - GetOrgGroup + unstable: + - v2 + order: 2 + - name: Delete an org group + url: '#delete-an-org-group' + identifier: org-groups-delete-an-org-group + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - DeleteOrgGroup + unstable: + - v2 + order: 5 + - name: Create an org group + url: '#create-an-org-group' + identifier: org-groups-create-an-org-group + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - CreateOrgGroup + unstable: + - v2 + order: 3 + - name: List org groups + url: '#list-org-groups' + identifier: org-groups-list-org-groups + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - ListOrgGroups + unstable: + - v2 + order: 1 + - name: Update an org group policy override + url: '#update-an-org-group-policy-override' + identifier: org-groups-update-an-org-group-policy-override + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - UpdateOrgGroupPolicyOverride + unstable: + - v2 + order: 16 + - name: Delete an org group policy override + url: '#delete-an-org-group-policy-override' + identifier: org-groups-delete-an-org-group-policy-override + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - DeleteOrgGroupPolicyOverride + unstable: + - v2 + order: 17 + - name: Create an org group policy override + url: '#create-an-org-group-policy-override' + identifier: org-groups-create-an-org-group-policy-override + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - CreateOrgGroupPolicyOverride + unstable: + - v2 + order: 15 + - name: List org group policy overrides + url: '#list-org-group-policy-overrides' + identifier: org-groups-list-org-group-policy-overrides + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - ListOrgGroupPolicyOverrides + unstable: + - v2 + order: 14 + - name: List org group policy configs + url: '#list-org-group-policy-configs' + identifier: org-groups-list-org-group-policy-configs + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - ListOrgGroupPolicyConfigs + unstable: + - v2 + order: 18 + - name: Update an org group policy + url: '#update-an-org-group-policy' + identifier: org-groups-update-an-org-group-policy + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - UpdateOrgGroupPolicy + unstable: + - v2 + order: 12 + - name: Delete an org group policy + url: '#delete-an-org-group-policy' + identifier: org-groups-delete-an-org-group-policy + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - DeleteOrgGroupPolicy + unstable: + - v2 + order: 13 + - name: Create an org group policy + url: '#create-an-org-group-policy' + identifier: org-groups-create-an-org-group-policy + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - CreateOrgGroupPolicy + unstable: + - v2 + order: 11 + - name: List org group policies + url: '#list-org-group-policies' + identifier: org-groups-list-org-group-policies + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - ListOrgGroupPolicies + unstable: + - v2 + order: 10 + - name: Update an org group membership + url: '#update-an-org-group-membership' + identifier: org-groups-update-an-org-group-membership + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - UpdateOrgGroupMembership + unstable: + - v2 + order: 8 + - name: Get an org group membership + url: '#get-an-org-group-membership' + identifier: org-groups-get-an-org-group-membership + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - GetOrgGroupMembership + unstable: + - v2 + order: 7 + - name: Bulk update org group memberships + url: '#bulk-update-org-group-memberships' + identifier: org-groups-bulk-update-org-group-memberships + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - BulkUpdateOrgGroupMemberships + unstable: + - v2 + order: 9 + - name: List org group memberships + url: '#list-org-group-memberships' + identifier: org-groups-list-org-group-memberships + parent: org-groups + generated: true + params: + versions: + - v2 + operationids: + - ListOrgGroupMemberships + unstable: + - v2 + order: 6 - name: Powerpack url: /api/latest/powerpack/ identifier: powerpack diff --git a/content/en/api/latest/org-groups/_index.md b/content/en/api/latest/org-groups/_index.md new file mode 100644 index 00000000000..3eaa20f5a1e --- /dev/null +++ b/content/en/api/latest/org-groups/_index.md @@ -0,0 +1,3 @@ +--- +title: Org Groups +--- diff --git a/content/en/api/v2/observability-pipelines/examples.json b/content/en/api/v2/observability-pipelines/examples.json index bc9418d3cbb..a0f8554ef8b 100644 --- a/content/en/api/v2/observability-pipelines/examples.json +++ b/content/en/api/v2/observability-pipelines/examples.json @@ -99,7 +99,7 @@ "totalCount": 42 } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

The schema data.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Metadata about the response.

\n
\n
\n
\n
\n
\n

totalCount

\n
\n

int64

\n

The total number of pipelines.

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

The schema data.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Metadata about the response.

\n
\n
\n
\n
\n
\n

totalCount

\n
\n

int64

\n

The total number of pipelines.

\n
\n \n
\n
\n
\n
" }, "400": { "json": { @@ -227,7 +227,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" }, "400": { "json": { @@ -411,7 +411,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the the pipeline configuration.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the the pipeline configuration.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" } }, "ValidatePipeline": { @@ -605,7 +605,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the the pipeline configuration.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the the pipeline configuration.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" } }, "DeletePipeline": { @@ -744,7 +744,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" }, "403": { "json": { @@ -864,7 +864,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" }, "400": { "json": { @@ -1058,7 +1058,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The value of the source field in log events to be processed by the Grok rules.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The elasticsearch destination writes logs or metrics to an Elasticsearch cluster.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The name of the index to write events to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for the Elasticsearch destination.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression algorithm applied when sending data to Elasticsearch. \nAllowed enum values: none,gzip,zlib,zstd,snappy

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

The compression level. Only applicable for gzip, zlib, and zstd algorithms.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

auto_routing

\n
\n

boolean

\n

When true, automatically routes events to the appropriate data stream based on the event content.

\n
\n \n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset. This groups events by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type. This determines how events are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace. This separates events into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n

sync_fields

\n
\n

boolean

\n

When true, synchronizes data stream fields with the Elasticsearch index mapping.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

id_key

\n
\n

string

\n

The name of the field used as the document ID in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

pipeline

\n
\n

string

\n

The name of an Elasticsearch ingest pipeline to apply to events before indexing.

\n
\n \n
\n
\n
\n
\n
\n

request_retry_partial

\n
\n

boolean

\n

When true, retries failed partial bulk requests when some events in a batch fail while others succeed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keep_unmatched

\n
\n

boolean

\n

Whether to keep an event that does not match any of the mapping filters.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The string used to replace matched sensitive data (for example, "***" or "[REDACTED]").

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

Optional settings for the hash action. When omitted or empty, matched sensitive data is\nreplaced with a deterministic hashed value that preserves structure for analytics while\nprotecting the original content. Reserved for future hash configuration (for example, algorithm or salt).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

Number of characters to leave visible from the start or end of the matched value; the rest are redacted.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

List of log attribute names (field paths) to which the scope applies. Only these fields are included in or excluded from pattern matching.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication, TLS encryption, and configurable compression.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression format for objects retrieved from the S3 bucket. Use auto to detect compression from the object's Content-Encoding header or file extension. \nAllowed enum values: auto,none,gzip,zstd

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" } } } \ No newline at end of file diff --git a/content/en/api/v2/org-groups/_index.md b/content/en/api/v2/org-groups/_index.md new file mode 100644 index 00000000000..d590ad8913b --- /dev/null +++ b/content/en/api/v2/org-groups/_index.md @@ -0,0 +1,4 @@ +--- +title: Org Groups +headless: true +--- diff --git a/content/en/api/v2/org-groups/examples.json b/content/en/api/v2/org-groups/examples.json new file mode 100644 index 00000000000..646d048cd0d --- /dev/null +++ b/content/en/api/v2/org-groups/examples.json @@ -0,0 +1,2161 @@ +{ + "ListOrgGroupMemberships": { + "responses": { + "200": { + "json": { + "data": [ + { + "attributes": { + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "org_name": "Acme Corp", + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "id": "f1e2d3c4-b5a6-7890-1234-567890abcdef", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_memberships" + } + ], + "meta": { + "page": { + "total_count": 42 + } + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

An array of org group memberships.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group membership.

\n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the membership was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the membership was last modified.

\n
\n \n
\n
\n
\n
\n
\n

org_name [required]

\n
\n

string

\n

The name of the member organization.

\n
\n \n
\n
\n
\n
\n
\n

org_site [required]

\n
\n

string

\n

The site of the member organization.

\n
\n \n
\n
\n
\n
\n
\n

org_uuid [required]

\n
\n

uuid

\n

The UUID of the member organization.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group membership.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group membership.

\n
\n
\n
\n
\n
\n

org_group

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group memberships resource type. \nAllowed enum values: org_group_memberships

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Pagination metadata.

\n
\n
\n
\n
\n
\n

page [required]

\n
\n

object

\n

Page-based pagination details.

\n
\n
\n
\n
\n
\n

total_count [required]

\n
\n

int64

\n

The total number of items.

\n
\n \n
\n
\n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": {}, + "json": {}, + "html": "" + } + }, + "BulkUpdateOrgGroupMemberships": { + "responses": { + "200": { + "json": { + "data": [ + { + "attributes": { + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "org_name": "Acme Corp", + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "id": "f1e2d3c4-b5a6-7890-1234-567890abcdef", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_memberships" + } + ], + "meta": { + "page": { + "total_count": 42 + } + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

An array of org group memberships.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group membership.

\n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the membership was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the membership was last modified.

\n
\n \n
\n
\n
\n
\n
\n

org_name [required]

\n
\n

string

\n

The name of the member organization.

\n
\n \n
\n
\n
\n
\n
\n

org_site [required]

\n
\n

string

\n

The site of the member organization.

\n
\n \n
\n
\n
\n
\n
\n

org_uuid [required]

\n
\n

uuid

\n

The UUID of the member organization.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group membership.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group membership.

\n
\n
\n
\n
\n
\n

org_group

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group memberships resource type. \nAllowed enum values: org_group_memberships

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Pagination metadata.

\n
\n
\n
\n
\n
\n

page [required]

\n
\n

object

\n

Page-based pagination details.

\n
\n
\n
\n
\n
\n

total_count [required]

\n
\n

int64

\n

The total number of items.

\n
\n \n
\n
\n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "404": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": { + "data": { + "attributes": { + "orgs": [ + { + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + } + ] + }, + "relationships": { + "source_org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + }, + "target_org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_membership_bulk_updates" + } + }, + "json": { + "data": { + "attributes": { + "orgs": [ + { + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + } + ] + }, + "relationships": { + "source_org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + }, + "target_org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_membership_bulk_updates" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Data for bulk updating org group memberships.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes for bulk updating org group memberships.

\n
\n
\n
\n
\n
\n

orgs [required]

\n
\n

[object]

\n

List of organizations to move. Maximum 100 per request.

\n
\n
\n
\n
\n
\n

org_site [required]

\n
\n

string

\n

The site of the organization.

\n
\n \n
\n
\n
\n
\n
\n

org_uuid [required]

\n
\n

uuid

\n

The UUID of the organization.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

relationships [required]

\n
\n

object

\n

Relationships for bulk updating memberships.

\n
\n
\n
\n
\n
\n

source_org_group [required]

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

target_org_group [required]

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group membership bulk update resource type. \nAllowed enum values: org_group_membership_bulk_updates

\n
\n \n
\n
\n
\n
" + } + }, + "GetOrgGroupMembership": { + "responses": { + "200": { + "json": { + "data": { + "attributes": { + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "org_name": "Acme Corp", + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "id": "f1e2d3c4-b5a6-7890-1234-567890abcdef", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_memberships" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

An org group membership resource.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group membership.

\n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the membership was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the membership was last modified.

\n
\n \n
\n
\n
\n
\n
\n

org_name [required]

\n
\n

string

\n

The name of the member organization.

\n
\n \n
\n
\n
\n
\n
\n

org_site [required]

\n
\n

string

\n

The site of the member organization.

\n
\n \n
\n
\n
\n
\n
\n

org_uuid [required]

\n
\n

uuid

\n

The UUID of the member organization.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group membership.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group membership.

\n
\n
\n
\n
\n
\n

org_group

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group memberships resource type. \nAllowed enum values: org_group_memberships

\n
\n \n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "404": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": {}, + "json": {}, + "html": "" + } + }, + "UpdateOrgGroupMembership": { + "responses": { + "200": { + "json": { + "data": { + "attributes": { + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "org_name": "Acme Corp", + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "id": "f1e2d3c4-b5a6-7890-1234-567890abcdef", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_memberships" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

An org group membership resource.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group membership.

\n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the membership was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the membership was last modified.

\n
\n \n
\n
\n
\n
\n
\n

org_name [required]

\n
\n

string

\n

The name of the member organization.

\n
\n \n
\n
\n
\n
\n
\n

org_site [required]

\n
\n

string

\n

The site of the member organization.

\n
\n \n
\n
\n
\n
\n
\n

org_uuid [required]

\n
\n

uuid

\n

The UUID of the member organization.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group membership.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group membership.

\n
\n
\n
\n
\n
\n

org_group

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group memberships resource type. \nAllowed enum values: org_group_memberships

\n
\n \n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "404": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": { + "data": { + "id": "f1e2d3c4-b5a6-7890-1234-567890abcdef", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_memberships" + } + }, + "json": { + "data": { + "id": "f1e2d3c4-b5a6-7890-1234-567890abcdef", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_memberships" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Data for updating an org group membership.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the membership.

\n
\n \n
\n
\n
\n
\n
\n

relationships [required]

\n
\n

object

\n

Relationships for updating a membership.

\n
\n
\n
\n
\n
\n

org_group [required]

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group memberships resource type. \nAllowed enum values: org_group_memberships

\n
\n \n
\n
\n
\n
" + } + }, + "ListOrgGroupPolicies": { + "responses": { + "200": { + "json": { + "data": [ + { + "attributes": { + "content": { + "value": "UTC" + }, + "enforced_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "policy_name": "monitor_timezone" + }, + "id": "1a2b3c4d-5e6f-7890-abcd-ef0123456789", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_policies" + } + ], + "meta": { + "page": { + "total_count": 42 + } + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

An array of org group policies.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group policy.

\n
\n
\n
\n
\n
\n

content

\n
\n

object

\n

The policy content as key-value pairs.

\n
\n \n
\n
\n
\n
\n
\n

enforced_at [required]

\n
\n

date-time

\n

Timestamp when the policy was enforced.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the policy was last modified.

\n
\n \n
\n
\n
\n
\n
\n

policy_name [required]

\n
\n

string

\n

The name of the policy.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group policy.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group policy.

\n
\n
\n
\n
\n
\n

org_group

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policies resource type. \nAllowed enum values: org_group_policies

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Pagination metadata.

\n
\n
\n
\n
\n
\n

page [required]

\n
\n

object

\n

Page-based pagination details.

\n
\n
\n
\n
\n
\n

total_count [required]

\n
\n

int64

\n

The total number of items.

\n
\n \n
\n
\n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": {}, + "json": {}, + "html": "" + } + }, + "CreateOrgGroupPolicy": { + "responses": { + "201": { + "json": { + "data": { + "attributes": { + "content": { + "value": "UTC" + }, + "enforced_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "policy_name": "monitor_timezone" + }, + "id": "1a2b3c4d-5e6f-7890-abcd-ef0123456789", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_policies" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

An org group policy resource.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group policy.

\n
\n
\n
\n
\n
\n

content

\n
\n

object

\n

The policy content as key-value pairs.

\n
\n \n
\n
\n
\n
\n
\n

enforced_at [required]

\n
\n

date-time

\n

Timestamp when the policy was enforced.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the policy was last modified.

\n
\n \n
\n
\n
\n
\n
\n

policy_name [required]

\n
\n

string

\n

The name of the policy.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group policy.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group policy.

\n
\n
\n
\n
\n
\n

org_group

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policies resource type. \nAllowed enum values: org_group_policies

\n
\n \n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "409": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": { + "data": { + "attributes": { + "content": { + "value": "UTC" + }, + "policy_name": "monitor_timezone" + }, + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_policies" + } + }, + "json": { + "data": { + "attributes": { + "content": { + "value": "UTC" + }, + "policy_name": "monitor_timezone" + }, + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_policies" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Data for creating an org group policy.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes for creating an org group policy.

\n
\n
\n
\n
\n
\n

content [required]

\n
\n

object

\n

The policy content as key-value pairs.

\n
\n \n
\n
\n
\n
\n
\n

policy_name [required]

\n
\n

string

\n

The name of the policy.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

relationships [required]

\n
\n

object

\n

Relationships for creating a policy.

\n
\n
\n
\n
\n
\n

org_group [required]

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policies resource type. \nAllowed enum values: org_group_policies

\n
\n \n
\n
\n
\n
" + } + }, + "DeleteOrgGroupPolicy": { + "responses": { + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "404": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": {}, + "json": {}, + "html": "" + } + }, + "UpdateOrgGroupPolicy": { + "responses": { + "200": { + "json": { + "data": { + "attributes": { + "content": { + "value": "UTC" + }, + "enforced_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "policy_name": "monitor_timezone" + }, + "id": "1a2b3c4d-5e6f-7890-abcd-ef0123456789", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_policies" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

An org group policy resource.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group policy.

\n
\n
\n
\n
\n
\n

content

\n
\n

object

\n

The policy content as key-value pairs.

\n
\n \n
\n
\n
\n
\n
\n

enforced_at [required]

\n
\n

date-time

\n

Timestamp when the policy was enforced.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the policy was last modified.

\n
\n \n
\n
\n
\n
\n
\n

policy_name [required]

\n
\n

string

\n

The name of the policy.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group policy.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group policy.

\n
\n
\n
\n
\n
\n

org_group

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policies resource type. \nAllowed enum values: org_group_policies

\n
\n \n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "404": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": { + "data": { + "attributes": {}, + "id": "1a2b3c4d-5e6f-7890-abcd-ef0123456789", + "type": "org_group_policies" + } + }, + "json": { + "data": { + "attributes": { + "content": { + "value": "UTC" + } + }, + "id": "1a2b3c4d-5e6f-7890-abcd-ef0123456789", + "type": "org_group_policies" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Data for updating an org group policy.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes for updating an org group policy.

\n
\n
\n
\n
\n
\n

content

\n
\n

object

\n

The policy content as key-value pairs.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the policy.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policies resource type. \nAllowed enum values: org_group_policies

\n
\n \n
\n
\n
\n
" + } + }, + "ListOrgGroupPolicyConfigs": { + "responses": { + "200": { + "json": { + "data": [ + { + "attributes": { + "allowed_values": [ + "UTC", + "US/Eastern", + "US/Pacific" + ], + "default_value": "UTC", + "description": "The default timezone for monitors.", + "name": "monitor_timezone", + "value_type": "string" + }, + "id": "monitor_timezone", + "type": "org_group_policy_configs" + } + ] + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

An array of org group policy configs.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group policy config.

\n
\n
\n
\n
\n
\n

allowed_values [required]

\n
\n

[string]

\n

The allowed values for this config.

\n
\n \n
\n
\n
\n
\n
\n

default_value [required]

\n
\n

\n

The default value for this config.

\n
\n \n
\n
\n
\n
\n
\n

description [required]

\n
\n

string

\n

The description of the policy config.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the policy config.

\n
\n \n
\n
\n
\n
\n
\n

value_type [required]

\n
\n

string

\n

The type of the value for this config.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The identifier of the policy config (uses the config name).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policy configs resource type. \nAllowed enum values: org_group_policy_configs

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": {}, + "json": {}, + "html": "" + } + }, + "ListOrgGroupPolicyOverrides": { + "responses": { + "200": { + "json": { + "data": [ + { + "attributes": { + "content": {}, + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "id": "9f8e7d6c-5b4a-3210-fedc-ba0987654321", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + }, + "org_group_policy": { + "data": { + "id": "1a2b3c4d-5e6f-7890-abcd-ef0123456789", + "type": "org_group_policies" + } + } + }, + "type": "org_group_policy_overrides" + } + ], + "meta": { + "page": { + "total_count": 42 + } + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

An array of org group policy overrides.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group policy override.

\n
\n
\n
\n
\n
\n

content

\n
\n

object

\n

The override content as key-value pairs.

\n
\n \n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the override was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the override was last modified.

\n
\n \n
\n
\n
\n
\n
\n

org_site [required]

\n
\n

string

\n

The site of the organization that has the override.

\n
\n \n
\n
\n
\n
\n
\n

org_uuid [required]

\n
\n

uuid

\n

The UUID of the organization that has the override.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the policy override.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group policy override.

\n
\n
\n
\n
\n
\n

org_group

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

org_group_policy

\n
\n

object

\n

Relationship to a single org group policy.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group policy.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the policy.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policies resource type. \nAllowed enum values: org_group_policies

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policy overrides resource type. \nAllowed enum values: org_group_policy_overrides

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Pagination metadata.

\n
\n
\n
\n
\n
\n

page [required]

\n
\n

object

\n

Page-based pagination details.

\n
\n
\n
\n
\n
\n

total_count [required]

\n
\n

int64

\n

The total number of items.

\n
\n \n
\n
\n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": {}, + "json": {}, + "html": "" + } + }, + "CreateOrgGroupPolicyOverride": { + "responses": { + "201": { + "json": { + "data": { + "attributes": { + "content": {}, + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "id": "9f8e7d6c-5b4a-3210-fedc-ba0987654321", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + }, + "org_group_policy": { + "data": { + "id": "1a2b3c4d-5e6f-7890-abcd-ef0123456789", + "type": "org_group_policies" + } + } + }, + "type": "org_group_policy_overrides" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

An org group policy override resource.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group policy override.

\n
\n
\n
\n
\n
\n

content

\n
\n

object

\n

The override content as key-value pairs.

\n
\n \n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the override was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the override was last modified.

\n
\n \n
\n
\n
\n
\n
\n

org_site [required]

\n
\n

string

\n

The site of the organization that has the override.

\n
\n \n
\n
\n
\n
\n
\n

org_uuid [required]

\n
\n

uuid

\n

The UUID of the organization that has the override.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the policy override.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group policy override.

\n
\n
\n
\n
\n
\n

org_group

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

org_group_policy

\n
\n

object

\n

Relationship to a single org group policy.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group policy.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the policy.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policies resource type. \nAllowed enum values: org_group_policies

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policy overrides resource type. \nAllowed enum values: org_group_policy_overrides

\n
\n \n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "409": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": { + "data": { + "attributes": { + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + }, + "org_group_policy": { + "data": { + "id": "1a2b3c4d-5e6f-7890-abcd-ef0123456789", + "type": "org_group_policies" + } + } + }, + "type": "org_group_policy_overrides" + } + }, + "json": { + "data": { + "attributes": { + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + }, + "org_group_policy": { + "data": { + "id": "1a2b3c4d-5e6f-7890-abcd-ef0123456789", + "type": "org_group_policies" + } + } + }, + "type": "org_group_policy_overrides" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Data for creating an org group policy override.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes for creating a policy override.

\n
\n
\n
\n
\n
\n

org_site [required]

\n
\n

string

\n

The site of the organization.

\n
\n \n
\n
\n
\n
\n
\n

org_uuid [required]

\n
\n

uuid

\n

The UUID of the organization to grant the override.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

relationships [required]

\n
\n

object

\n

Relationships for creating a policy override.

\n
\n
\n
\n
\n
\n

org_group [required]

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

org_group_policy [required]

\n
\n

object

\n

Relationship to a single org group policy.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group policy.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the policy.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policies resource type. \nAllowed enum values: org_group_policies

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policy overrides resource type. \nAllowed enum values: org_group_policy_overrides

\n
\n \n
\n
\n
\n
" + } + }, + "DeleteOrgGroupPolicyOverride": { + "responses": { + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "404": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": {}, + "json": {}, + "html": "" + } + }, + "UpdateOrgGroupPolicyOverride": { + "responses": { + "200": { + "json": { + "data": { + "attributes": { + "content": {}, + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "id": "9f8e7d6c-5b4a-3210-fedc-ba0987654321", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + }, + "org_group_policy": { + "data": { + "id": "1a2b3c4d-5e6f-7890-abcd-ef0123456789", + "type": "org_group_policies" + } + } + }, + "type": "org_group_policy_overrides" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

An org group policy override resource.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group policy override.

\n
\n
\n
\n
\n
\n

content

\n
\n

object

\n

The override content as key-value pairs.

\n
\n \n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the override was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the override was last modified.

\n
\n \n
\n
\n
\n
\n
\n

org_site [required]

\n
\n

string

\n

The site of the organization that has the override.

\n
\n \n
\n
\n
\n
\n
\n

org_uuid [required]

\n
\n

uuid

\n

The UUID of the organization that has the override.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the policy override.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group policy override.

\n
\n
\n
\n
\n
\n

org_group

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

org_group_policy

\n
\n

object

\n

Relationship to a single org group policy.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group policy.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the policy.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policies resource type. \nAllowed enum values: org_group_policies

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policy overrides resource type. \nAllowed enum values: org_group_policy_overrides

\n
\n \n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "404": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": { + "data": { + "attributes": { + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "id": "9f8e7d6c-5b4a-3210-fedc-ba0987654321", + "type": "org_group_policy_overrides" + } + }, + "json": { + "data": { + "attributes": { + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "id": "9f8e7d6c-5b4a-3210-fedc-ba0987654321", + "type": "org_group_policy_overrides" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Data for updating a policy override.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes for updating a policy override.

\n
\n
\n
\n
\n
\n

org_site [required]

\n
\n

string

\n

The site of the organization.

\n
\n \n
\n
\n
\n
\n
\n

org_uuid [required]

\n
\n

uuid

\n

The UUID of the organization.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the policy override.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group policy overrides resource type. \nAllowed enum values: org_group_policy_overrides

\n
\n \n
\n
\n
\n
" + } + }, + "ListOrgGroups": { + "responses": { + "200": { + "json": { + "data": [ + { + "attributes": { + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "name": "My Org Group", + "owner_org_site": "datadoghq.com", + "owner_org_uuid": "b2c3d4e5-f6a7-8901-bcde-f01234567890" + }, + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "relationships": { + "memberships": { + "data": [ + { + "id": "f1e2d3c4-b5a6-7890-1234-567890abcdef", + "type": "org_group_memberships" + } + ] + } + }, + "type": "org_groups" + } + ], + "included": [ + { + "attributes": { + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "org_name": "Acme Corp", + "org_site": "datadoghq.com", + "org_uuid": "c3d4e5f6-a7b8-9012-cdef-012345678901" + }, + "id": "f1e2d3c4-b5a6-7890-1234-567890abcdef", + "relationships": { + "org_group": { + "data": { + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + } + }, + "type": "org_group_memberships" + } + ], + "meta": { + "page": { + "total_count": 42 + } + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

An array of org groups.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group.

\n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the org group was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the org group was last modified.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the org group.

\n
\n \n
\n
\n
\n
\n
\n

owner_org_site [required]

\n
\n

string

\n

The site of the organization that owns this org group.

\n
\n \n
\n
\n
\n
\n
\n

owner_org_uuid [required]

\n
\n

uuid

\n

The UUID of the organization that owns this org group.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group.

\n
\n
\n
\n
\n
\n

memberships

\n
\n

object

\n

Relationship to org group memberships.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

An array of membership relationship references.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the membership.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group memberships resource type. \nAllowed enum values: org_group_memberships

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

included

\n
\n

[object]

\n

Related resources included in the response when requested with the include parameter.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group membership.

\n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the membership was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the membership was last modified.

\n
\n \n
\n
\n
\n
\n
\n

org_name [required]

\n
\n

string

\n

The name of the member organization.

\n
\n \n
\n
\n
\n
\n
\n

org_site [required]

\n
\n

string

\n

The site of the member organization.

\n
\n \n
\n
\n
\n
\n
\n

org_uuid [required]

\n
\n

uuid

\n

The UUID of the member organization.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group membership.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group membership.

\n
\n
\n
\n
\n
\n

org_group

\n
\n

object

\n

Relationship to a single org group.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

object

\n

A reference to an org group.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group memberships resource type. \nAllowed enum values: org_group_memberships

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Pagination metadata.

\n
\n
\n
\n
\n
\n

page [required]

\n
\n

object

\n

Page-based pagination details.

\n
\n
\n
\n
\n
\n

total_count [required]

\n
\n

int64

\n

The total number of items.

\n
\n \n
\n
\n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": {}, + "json": {}, + "html": "" + } + }, + "CreateOrgGroup": { + "responses": { + "201": { + "json": { + "data": { + "attributes": { + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "name": "My Org Group", + "owner_org_site": "datadoghq.com", + "owner_org_uuid": "b2c3d4e5-f6a7-8901-bcde-f01234567890" + }, + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "relationships": { + "memberships": { + "data": [ + { + "id": "f1e2d3c4-b5a6-7890-1234-567890abcdef", + "type": "org_group_memberships" + } + ] + } + }, + "type": "org_groups" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

An org group resource.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group.

\n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the org group was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the org group was last modified.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the org group.

\n
\n \n
\n
\n
\n
\n
\n

owner_org_site [required]

\n
\n

string

\n

The site of the organization that owns this org group.

\n
\n \n
\n
\n
\n
\n
\n

owner_org_uuid [required]

\n
\n

uuid

\n

The UUID of the organization that owns this org group.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group.

\n
\n
\n
\n
\n
\n

memberships

\n
\n

object

\n

Relationship to org group memberships.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

An array of membership relationship references.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the membership.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group memberships resource type. \nAllowed enum values: org_group_memberships

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "409": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": { + "data": { + "attributes": { + "name": "My Org Group" + }, + "type": "org_groups" + } + }, + "json": { + "data": { + "attributes": { + "name": "My Org Group" + }, + "type": "org_groups" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Data for creating an org group.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes for creating an org group.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the org group.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
" + } + }, + "DeleteOrgGroup": { + "responses": { + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "404": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": {}, + "json": {}, + "html": "" + } + }, + "GetOrgGroup": { + "responses": { + "200": { + "json": { + "data": { + "attributes": { + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "name": "My Org Group", + "owner_org_site": "datadoghq.com", + "owner_org_uuid": "b2c3d4e5-f6a7-8901-bcde-f01234567890" + }, + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "relationships": { + "memberships": { + "data": [ + { + "id": "f1e2d3c4-b5a6-7890-1234-567890abcdef", + "type": "org_group_memberships" + } + ] + } + }, + "type": "org_groups" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

An org group resource.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group.

\n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the org group was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the org group was last modified.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the org group.

\n
\n \n
\n
\n
\n
\n
\n

owner_org_site [required]

\n
\n

string

\n

The site of the organization that owns this org group.

\n
\n \n
\n
\n
\n
\n
\n

owner_org_uuid [required]

\n
\n

uuid

\n

The UUID of the organization that owns this org group.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group.

\n
\n
\n
\n
\n
\n

memberships

\n
\n

object

\n

Relationship to org group memberships.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

An array of membership relationship references.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the membership.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group memberships resource type. \nAllowed enum values: org_group_memberships

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "404": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": {}, + "json": {}, + "html": "" + } + }, + "UpdateOrgGroup": { + "responses": { + "200": { + "json": { + "data": { + "attributes": { + "created_at": "2024-01-15T10:30:00Z", + "modified_at": "2024-01-15T10:30:00Z", + "name": "My Org Group", + "owner_org_site": "datadoghq.com", + "owner_org_uuid": "b2c3d4e5-f6a7-8901-bcde-f01234567890" + }, + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "relationships": { + "memberships": { + "data": [ + { + "id": "f1e2d3c4-b5a6-7890-1234-567890abcdef", + "type": "org_group_memberships" + } + ] + } + }, + "type": "org_groups" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

An org group resource.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes of an org group.

\n
\n
\n
\n
\n
\n

created_at [required]

\n
\n

date-time

\n

Timestamp when the org group was created.

\n
\n \n
\n
\n
\n
\n
\n

modified_at [required]

\n
\n

date-time

\n

Timestamp when the org group was last modified.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the org group.

\n
\n \n
\n
\n
\n
\n
\n

owner_org_site [required]

\n
\n

string

\n

The site of the organization that owns this org group.

\n
\n \n
\n
\n
\n
\n
\n

owner_org_uuid [required]

\n
\n

uuid

\n

The UUID of the organization that owns this org group.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

relationships

\n
\n

object

\n

Relationships of an org group.

\n
\n
\n
\n
\n
\n

memberships

\n
\n

object

\n

Relationship to org group memberships.

\n
\n
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

An array of membership relationship references.

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the membership.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org group memberships resource type. \nAllowed enum values: org_group_memberships

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
" + }, + "400": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "401": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "403": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "404": { + "json": { + "errors": [ + { + "detail": "Missing required attribute in body", + "meta": {}, + "source": { + "header": "Authorization", + "parameter": "limit", + "pointer": "/data/attributes/title" + }, + "status": "400", + "title": "Bad Request" + } + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[object]

\n

A list of errors.

\n
\n
\n
\n
\n
\n

detail

\n
\n

string

\n

A human-readable explanation specific to this occurrence of the error.

\n
\n \n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Non-standard meta-information about the error

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

object

\n

References to the source of the error.

\n
\n
\n
\n
\n
\n

header

\n
\n

string

\n

A string indicating the name of a single request header which caused the error.

\n
\n \n
\n
\n
\n
\n
\n

parameter

\n
\n

string

\n

A string indicating which URI query parameter caused the error.

\n
\n \n
\n
\n
\n
\n
\n

pointer

\n
\n

string

\n

A JSON pointer to the value in the request document that caused the error.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

status

\n
\n

string

\n

Status code of the response.

\n
\n \n
\n
\n
\n
\n
\n

title

\n
\n

string

\n

Short human-readable summary of the error.

\n
\n \n
\n
\n
\n
" + }, + "429": { + "json": { + "errors": [ + "Bad Request" + ] + }, + "html": "
\n
\n
\n
\n

errors [required]

\n
\n

[string]

\n

A list of errors.

\n
\n \n
\n
" + } + }, + "request": { + "json_curl": { + "data": { + "attributes": { + "name": "Updated Org Group Name" + }, + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + }, + "json": { + "data": { + "attributes": { + "name": "Updated Org Group Name" + }, + "id": "a1b2c3d4-e5f6-7890-abcd-ef0123456789", + "type": "org_groups" + } + }, + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Data for updating an org group.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Attributes for updating an org group.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the org group.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

uuid

\n

The ID of the org group.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Org groups resource type. \nAllowed enum values: org_groups

\n
\n \n
\n
\n
\n
" + } + } +} \ No newline at end of file diff --git a/data/api/v2/full_spec.yaml b/data/api/v2/full_spec.yaml index c0961d68ec6..048c81e3198 100644 --- a/data/api/v2/full_spec.yaml +++ b/data/api/v2/full_spec.yaml @@ -790,6 +790,14 @@ components: required: true schema: type: string + MembershipSort: + description: >- + Field to sort memberships by. Supported values: `name`, `uuid`, `-name`, `-uuid`. Defaults to `uuid`. + in: query + name: sort + required: false + schema: + $ref: "#/components/schemas/OrgGroupMembershipSortOption" MetricID: description: The name of the log-based metric. in: path @@ -921,6 +929,148 @@ components: example: "f9ec96b0-8c8a-4b0a-9b0a-1b2c3d4e5f6a" format: uuid type: string + OrgGroupId: + description: The ID of the org group. + in: path + name: org_group_id + required: true + schema: + example: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + format: uuid + type: string + OrgGroupInclude: + description: >- + List of related resources to include. + explode: false + in: query + name: include + required: false + schema: + example: + - memberships + items: + $ref: "#/components/schemas/OrgGroupIncludeOption" + type: array + style: form + OrgGroupMembershipFilterOrgGroupId: + description: Filter memberships by org group ID. Required when `filter[org_uuid]` is not provided. + in: query + name: filter[org_group_id] + required: false + schema: + example: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + format: uuid + type: string + OrgGroupMembershipFilterOrgUuid: + description: Filter memberships by org UUID. Returns a single-item list. + in: query + name: filter[org_uuid] + required: false + schema: + example: "b2c3d4e5-f6a7-8901-bcde-f01234567890" + format: uuid + type: string + OrgGroupMembershipId: + description: The ID of the org group membership. + in: path + name: org_group_membership_id + required: true + schema: + example: "f1e2d3c4-b5a6-7890-1234-567890abcdef" + format: uuid + type: string + OrgGroupPageNumber: + description: The page number to return. + in: query + name: page[number] + required: false + schema: + default: 0 + example: 0 + format: int64 + minimum: 0 + type: integer + OrgGroupPageSize: + description: The number of items per page. Maximum is 1000. + in: query + name: page[size] + required: false + schema: + default: 50 + example: 50 + format: int64 + maximum: 1000 + minimum: 1 + type: integer + OrgGroupPolicyFilterOrgGroupId: + description: Filter policies by org group ID. + in: query + name: filter[org_group_id] + required: true + schema: + example: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + format: uuid + type: string + OrgGroupPolicyFilterPolicyName: + description: Filter policies by policy name. + in: query + name: filter[policy_name] + required: false + schema: + example: monitor_timezone + type: string + OrgGroupPolicyId: + description: The ID of the org group policy. + in: path + name: org_group_policy_id + required: true + schema: + example: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + format: uuid + type: string + OrgGroupPolicyOverrideFilterOrgGroupId: + description: Filter policy overrides by org group ID. + in: query + name: filter[org_group_id] + required: true + schema: + example: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + format: uuid + type: string + OrgGroupPolicyOverrideFilterPolicyId: + description: Filter policy overrides by policy ID. + in: query + name: filter[policy_id] + required: false + schema: + example: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + format: uuid + type: string + OrgGroupPolicyOverrideId: + description: The ID of the org group policy override. + in: path + name: org_group_policy_override_id + required: true + schema: + example: "9f8e7d6c-5b4a-3210-fedc-ba0987654321" + format: uuid + type: string + OrgGroupSort: + description: >- + Field to sort org groups by. Supported values: `name`, `uuid`, `-name`, `-uuid`. Defaults to `uuid`. + in: query + name: sort + required: false + schema: + $ref: "#/components/schemas/OrgGroupSortOption" + OverrideSort: + description: >- + Field to sort overrides by. Supported values: `id`, `org_uuid`, `-id`, `-org_uuid`. Defaults to `id`. + in: query + name: sort + required: false + schema: + $ref: "#/components/schemas/OrgGroupPolicyOverrideSortOption" PageNumber: description: Specific page number to return. in: query @@ -959,6 +1109,14 @@ components: required: true schema: type: string + PolicySort: + description: >- + Field to sort policies by. Supported values: `id`, `name`, `-id`, `-name`. Defaults to `id`. + in: query + name: sort + required: false + schema: + $ref: "#/components/schemas/OrgGroupPolicySortOption" ProjectIDPathParameter: description: Project UUID. example: "e555e290-ed65-49bd-ae18-8acbfcf18db7" @@ -29703,6 +29861,22 @@ components: type: string x-enum-varnames: - INCIDENTS_GLOBAL_SETTINGS + GlobalOrgIdentifier: + description: A unique identifier for an organization including its site. + properties: + org_site: + description: The site of the organization. + example: "datadoghq.com" + type: string + org_uuid: + description: The UUID of the organization. + example: "c3d4e5f6-a7b8-9012-cdef-012345678901" + format: uuid + type: string + required: + - org_uuid + - org_site + type: object GlobalVariableData: description: Synthetics global variable data. Wrapper around the global variable object. properties: @@ -46430,7 +46604,7 @@ components: $ref: "#/components/schemas/ObservabilityPipelineParseGrokProcessorRuleMatchRule" type: array source: - description: The value of the source field in log events to be processed by the Grok rules. + description: The name of the field in the log event to apply the Grok rules to. example: "message" type: string support_rules: @@ -49144,851 +49318,1743 @@ components: type: string x-enum-varnames: - USERS - Organization: - description: Organization object. - properties: - attributes: - $ref: "#/components/schemas/OrganizationAttributes" - id: - description: ID of the organization. - type: string - type: - $ref: "#/components/schemas/OrganizationsType" - required: - - type - type: object - OrganizationAttributes: - description: Attributes of the organization. + OrgGroupAttributes: + description: Attributes of an org group. properties: created_at: - description: Creation time of the organization. + description: Timestamp when the org group was created. + example: "2024-01-15T10:30:00Z" format: date-time type: string - description: - description: Description of the organization. - type: string - disabled: - description: Whether or not the organization is disabled. - type: boolean modified_at: - description: Time of last organization modification. + description: Timestamp when the org group was last modified. + example: "2024-01-15T10:30:00Z" format: date-time type: string name: - description: Name of the organization. - type: string - public_id: - description: Public ID of the organization. + description: The name of the org group. + example: "My Org Group" type: string - sharing: - description: Sharing type of the organization. + owner_org_site: + description: The site of the organization that owns this org group. + example: "datadoghq.com" type: string - url: - description: URL of the site that this organization exists at. + owner_org_uuid: + description: The UUID of the organization that owns this org group. + example: "b2c3d4e5-f6a7-8901-bcde-f01234567890" + format: uuid type: string + required: + - name + - owner_org_uuid + - owner_org_site + - created_at + - modified_at type: object - OrganizationsType: - default: orgs - description: Organizations resource type. - enum: - - orgs - example: orgs - type: string - x-enum-varnames: - - ORGS - OutboundEdge: - description: The definition of `OutboundEdge` object. + OrgGroupCreateAttributes: + description: Attributes for creating an org group. properties: - branchName: - description: The `OutboundEdge` `branchName`. - example: "" - type: string - nextStepName: - description: The `OutboundEdge` `nextStepName`. - example: "" + name: + description: The name of the org group. + example: "My Org Group" type: string required: - - nextStepName - - branchName + - name type: object - OutcomeType: - default: outcome - description: The JSON:API type for an outcome. - enum: - - outcome - example: outcome - type: string - x-enum-varnames: - - OUTCOME - OutcomesBatchAttributes: - description: The JSON:API attributes for a batched set of scorecard outcomes. + OrgGroupCreateData: + description: Data for creating an org group. properties: - results: - description: Set of scorecard outcomes to update. - items: - $ref: "#/components/schemas/OutcomesBatchRequestItem" - type: array + attributes: + $ref: "#/components/schemas/OrgGroupCreateAttributes" + type: + $ref: "#/components/schemas/OrgGroupType" + required: + - type + - attributes type: object - OutcomesBatchRequest: - description: Scorecard outcomes batch request. + OrgGroupCreateRequest: + description: Request to create an org group. properties: data: - $ref: "#/components/schemas/OutcomesBatchRequestData" + $ref: "#/components/schemas/OrgGroupCreateData" + required: + - data type: object - OutcomesBatchRequestData: - description: Scorecard outcomes batch request data. + OrgGroupData: + description: An org group resource. properties: attributes: - $ref: "#/components/schemas/OutcomesBatchAttributes" - type: - $ref: "#/components/schemas/OutcomesBatchType" - type: object - OutcomesBatchRequestItem: - description: Scorecard outcome for a specific rule, for a given service within a batched update. - properties: - remarks: - description: >- - Any remarks regarding the scorecard rule's evaluation, and supports HTML hyperlinks. - example: 'See: Services' - type: string - rule_id: - $ref: "#/components/schemas/RuleId" - service_name: - description: The unique name for a service in the catalog. - example: my-service + $ref: "#/components/schemas/OrgGroupAttributes" + id: + description: The ID of the org group. + example: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + format: uuid type: string - state: - $ref: "#/components/schemas/State" + relationships: + $ref: "#/components/schemas/OrgGroupRelationships" + type: + $ref: "#/components/schemas/OrgGroupType" required: - - rule_id - - service_name - - state + - id + - type + - attributes type: object - OutcomesBatchResponse: - description: Scorecard outcomes batch response. + OrgGroupIncludeOption: + description: Allowed include options for org group endpoints. + enum: + - memberships + type: string + x-enum-varnames: + - MEMBERSHIPS + OrgGroupListResponse: + description: Response containing a list of org groups. properties: data: - $ref: "#/components/schemas/OutcomesBatchResponseData" - example: - - attributes: - service_name: my-service - state: pass - id: "outcome-abc123" - type: rule-outcome + description: An array of org groups. + items: + $ref: "#/components/schemas/OrgGroupData" + type: array + included: + description: Related resources included in the response when requested with the `include` parameter. + items: + $ref: "#/components/schemas/OrgGroupMembershipData" + type: array meta: - $ref: "#/components/schemas/OutcomesBatchResponseMeta" + $ref: "#/components/schemas/OrgGroupPaginationMeta" required: - data - - meta type: object - OutcomesBatchResponseAttributes: - description: The JSON:API attributes for an outcome. + OrgGroupMembershipAttributes: + description: Attributes of an org group membership. properties: created_at: - description: Creation time of the rule outcome. + description: Timestamp when the membership was created. + example: "2024-01-15T10:30:00Z" format: date-time type: string modified_at: - description: Time of last rule outcome modification. + description: Timestamp when the membership was last modified. + example: "2024-01-15T10:30:00Z" format: date-time type: string - remarks: - description: >- - Any remarks regarding the scorecard rule's evaluation, and supports HTML hyperlinks. - example: 'See: Services' + org_name: + description: The name of the member organization. + example: "Acme Corp" type: string - service_name: - description: The unique name for a service in the catalog. - example: my-service + org_site: + description: The site of the member organization. + example: "datadoghq.com" type: string - state: - $ref: "#/components/schemas/State" + org_uuid: + description: The UUID of the member organization. + example: "c3d4e5f6-a7b8-9012-cdef-012345678901" + format: uuid + type: string + required: + - org_name + - org_uuid + - org_site + - created_at + - modified_at type: object - OutcomesBatchResponseData: - description: List of rule outcomes which were affected during the bulk operation. - items: - $ref: "#/components/schemas/OutcomesResponseDataItem" - type: array - OutcomesBatchResponseMeta: - description: Metadata pertaining to the bulk operation. + OrgGroupMembershipBulkUpdateAttributes: + description: Attributes for bulk updating org group memberships. properties: - total_received: - description: Total number of scorecard results received during the bulk operation. - format: int64 - type: integer - total_updated: - description: Total number of scorecard results modified during the bulk operation. - format: int64 - type: integer + orgs: + description: List of organizations to move. Maximum 100 per request. + items: + $ref: "#/components/schemas/GlobalOrgIdentifier" + type: array + required: + - orgs type: object - OutcomesBatchType: - default: batched-outcome - description: The JSON:API type for scorecard outcomes. - enum: [batched-outcome] - example: batched-outcome - type: string - x-enum-varnames: [BATCHED_OUTCOME] - OutcomesResponse: - description: Scorecard outcomes - the result of a rule for a service. + OrgGroupMembershipBulkUpdateData: + description: Data for bulk updating org group memberships. + properties: + attributes: + $ref: "#/components/schemas/OrgGroupMembershipBulkUpdateAttributes" + relationships: + $ref: "#/components/schemas/OrgGroupMembershipBulkUpdateRelationships" + type: + $ref: "#/components/schemas/OrgGroupMembershipBulkUpdateType" + required: + - type + - attributes + - relationships + type: object + OrgGroupMembershipBulkUpdateRelationships: + description: Relationships for bulk updating memberships. + properties: + source_org_group: + $ref: "#/components/schemas/OrgGroupRelationshipToOne" + target_org_group: + $ref: "#/components/schemas/OrgGroupRelationshipToOne" + required: + - source_org_group + - target_org_group + type: object + OrgGroupMembershipBulkUpdateRequest: + description: Request to bulk update org group memberships. properties: data: - $ref: "#/components/schemas/OutcomesResponseData" - included: - $ref: "#/components/schemas/OutcomesResponseIncluded" - links: - $ref: "#/components/schemas/OutcomesResponseLinks" + $ref: "#/components/schemas/OrgGroupMembershipBulkUpdateData" + required: + - data type: object - OutcomesResponseData: - description: List of rule outcomes. - items: - $ref: "#/components/schemas/OutcomesResponseDataItem" - type: array - OutcomesResponseDataItem: - description: A single rule outcome. + OrgGroupMembershipBulkUpdateType: + description: Org group membership bulk update resource type. + enum: + - org_group_membership_bulk_updates + example: org_group_membership_bulk_updates + type: string + x-enum-varnames: + - ORG_GROUP_MEMBERSHIP_BULK_UPDATES + OrgGroupMembershipData: + description: An org group membership resource. properties: attributes: - $ref: "#/components/schemas/OutcomesBatchResponseAttributes" + $ref: "#/components/schemas/OrgGroupMembershipAttributes" id: - description: The unique ID for a rule outcome. + description: The ID of the org group membership. + example: "f1e2d3c4-b5a6-7890-1234-567890abcdef" + format: uuid type: string relationships: - $ref: "#/components/schemas/RuleOutcomeRelationships" + $ref: "#/components/schemas/OrgGroupMembershipRelationships" type: - $ref: "#/components/schemas/OutcomeType" + $ref: "#/components/schemas/OrgGroupMembershipType" + required: + - id + - type + - attributes type: object - OutcomesResponseIncluded: - description: Array of rule details. - items: - $ref: "#/components/schemas/OutcomesResponseIncludedItem" - type: array - OutcomesResponseIncludedItem: - description: Attributes of the included rule. + OrgGroupMembershipListResponse: + description: Response containing a list of org group memberships. + properties: + data: + description: An array of org group memberships. + items: + $ref: "#/components/schemas/OrgGroupMembershipData" + type: array + meta: + $ref: "#/components/schemas/OrgGroupPaginationMeta" + required: + - data + type: object + OrgGroupMembershipRelationshipData: + description: A reference to an org group membership. properties: - attributes: - $ref: "#/components/schemas/OutcomesResponseIncludedRuleAttributes" id: - $ref: "#/components/schemas/RuleId" + description: The ID of the membership. + example: "f1e2d3c4-b5a6-7890-1234-567890abcdef" + format: uuid + type: string type: - $ref: "#/components/schemas/RuleType" + $ref: "#/components/schemas/OrgGroupMembershipType" + required: + - id + - type type: object - OutcomesResponseIncludedRuleAttributes: - description: Details of a rule. + OrgGroupMembershipRelationships: + description: Relationships of an org group membership. properties: - name: - description: Name of the rule. - example: Team Defined - type: string - scorecard_name: - description: The scorecard name to which this rule must belong. - example: Observability Best Practices - type: string + org_group: + $ref: "#/components/schemas/OrgGroupRelationshipToOne" type: object - OutcomesResponseLinks: - description: Links attributes. + OrgGroupMembershipResponse: + description: Response containing a single org group membership. properties: - next: - description: |- - Link for the next set of results. - example: "/api/v2/scorecard/outcomes?include=rule&page%5Blimit%5D=100&page%5Boffset%5D=100" + data: + $ref: "#/components/schemas/OrgGroupMembershipData" + required: + - data + type: object + OrgGroupMembershipSortOption: + default: uuid + description: Field to sort memberships by. + enum: + - name + - -name + - uuid + - -uuid + example: uuid + type: string + x-enum-varnames: + - NAME + - MINUS_NAME + - UUID + - MINUS_UUID + OrgGroupMembershipType: + description: Org group memberships resource type. + enum: + - org_group_memberships + example: org_group_memberships + type: string + x-enum-varnames: + - ORG_GROUP_MEMBERSHIPS + OrgGroupMembershipUpdateData: + description: Data for updating an org group membership. + properties: + id: + description: The ID of the membership. + example: "f1e2d3c4-b5a6-7890-1234-567890abcdef" + format: uuid type: string + relationships: + $ref: "#/components/schemas/OrgGroupMembershipUpdateRelationships" + type: + $ref: "#/components/schemas/OrgGroupMembershipType" + required: + - id + - type + - relationships type: object - OutputSchema: - description: "A list of output parameters for the workflow." + OrgGroupMembershipUpdateRelationships: + description: Relationships for updating a membership. properties: - parameters: - description: The `OutputSchema` `parameters`. + org_group: + $ref: "#/components/schemas/OrgGroupRelationshipToOne" + required: + - org_group + type: object + OrgGroupMembershipUpdateRequest: + description: Request to update an org group membership. + properties: + data: + $ref: "#/components/schemas/OrgGroupMembershipUpdateData" + required: + - data + type: object + OrgGroupMembershipsRelationship: + description: Relationship to org group memberships. + properties: + data: + description: An array of membership relationship references. items: - $ref: "#/components/schemas/OutputSchemaParameters" + $ref: "#/components/schemas/OrgGroupMembershipRelationshipData" type: array + required: + - data type: object - OutputSchemaParameters: - description: The definition of `OutputSchemaParameters` object. + OrgGroupPaginationMeta: + description: Pagination metadata. properties: - defaultValue: - description: The `OutputSchemaParameters` `defaultValue`. - description: - description: The `OutputSchemaParameters` `description`. + page: + $ref: "#/components/schemas/OrgGroupPaginationMetaPage" + required: + - page + type: object + OrgGroupPaginationMetaPage: + description: Page-based pagination details. + properties: + total_count: + description: The total number of items. + example: 42 + format: int64 + type: integer + required: + - total_count + type: object + OrgGroupPolicyAttributes: + description: Attributes of an org group policy. + properties: + content: + additionalProperties: {} + description: The policy content as key-value pairs. + example: + value: "UTC" + type: object + enforced_at: + description: Timestamp when the policy was enforced. + example: "2024-01-15T10:30:00Z" + format: date-time type: string - label: - description: The `OutputSchemaParameters` `label`. + modified_at: + description: Timestamp when the policy was last modified. + example: "2024-01-15T10:30:00Z" + format: date-time + type: string + policy_name: + description: The name of the policy. + example: "monitor_timezone" + type: string + required: + - policy_name + - enforced_at + - modified_at + type: object + OrgGroupPolicyConfigAttributes: + description: Attributes of an org group policy config. + properties: + allowed_values: + description: The allowed values for this config. + example: ["UTC", "US/Eastern", "US/Pacific"] + items: + description: An allowed value for this config. + type: string + type: array + default_value: + description: The default value for this config. + example: "UTC" + description: + description: The description of the policy config. + example: "The default timezone for monitors." type: string name: - description: The `OutputSchemaParameters` `name`. - example: "" + description: The name of the policy config. + example: "monitor_timezone" + type: string + value_type: + description: The type of the value for this config. + example: "string" type: string - type: - $ref: "#/components/schemas/OutputSchemaParametersType" - value: - description: The `OutputSchemaParameters` `value`. required: - name + - description + - value_type + - allowed_values + - default_value + type: object + OrgGroupPolicyConfigData: + description: An org group policy config resource. + properties: + attributes: + $ref: "#/components/schemas/OrgGroupPolicyConfigAttributes" + id: + description: The identifier of the policy config (uses the config name). + example: "monitor_timezone" + type: string + type: + $ref: "#/components/schemas/OrgGroupPolicyConfigType" + required: + - id - type + - attributes type: object - OutputSchemaParametersType: - description: The definition of `OutputSchemaParametersType` object. - enum: - - STRING - - NUMBER - - BOOLEAN - - OBJECT - - ARRAY_STRING - - ARRAY_NUMBER - - ARRAY_BOOLEAN - - ARRAY_OBJECT - example: STRING - type: string - x-enum-varnames: - - STRING - - NUMBER - - BOOLEAN - - OBJECT - - ARRAY_STRING - - ARRAY_NUMBER - - ARRAY_BOOLEAN - - ARRAY_OBJECT - OverwriteAllocationsRequest: - description: Request to overwrite targeting rules (allocations) for a feature flag in an environment. + OrgGroupPolicyConfigListResponse: + description: Response containing a list of org group policy configs. properties: data: - description: Targeting rules (allocations) to replace existing ones with. + description: An array of org group policy configs. items: - $ref: "#/components/schemas/AllocationDataRequest" + $ref: "#/components/schemas/OrgGroupPolicyConfigData" type: array required: - data type: object - PageUrgency: - default: high - description: On-Call Page urgency level. + OrgGroupPolicyConfigType: + description: Org group policy configs resource type. enum: - - low - - high - example: high + - org_group_policy_configs + example: org_group_policy_configs type: string x-enum-varnames: - - LOW - - HIGH - PaginatedResponseMeta: - description: Metadata for scores response. + - ORG_GROUP_POLICY_CONFIGS + OrgGroupPolicyCreateAttributes: + description: Attributes for creating an org group policy. properties: - count: - description: Number of entities in this response. - example: 10 - format: int64 - type: integer - limit: - description: Pagination limit. - example: 10 - format: int64 - type: integer - offset: - description: Pagination offset. - example: 0 - format: int64 - type: integer - total: - description: Total number of entities available. - example: 150 - format: int64 - type: integer + content: + additionalProperties: {} + description: The policy content as key-value pairs. + example: + value: "UTC" + type: object + policy_name: + description: The name of the policy. + example: "monitor_timezone" + type: string required: - - count - - total - - limit - - offset - type: object - Pagination: - description: Pagination object. - properties: - total_count: - description: Total count. - format: int64 - type: integer - total_filtered_count: - description: Total count of elements matched by the filter. - format: int64 - type: integer + - policy_name + - content type: object - PaginationMeta: - description: Response metadata. + OrgGroupPolicyCreateData: + description: Data for creating an org group policy. properties: - page: - $ref: "#/components/schemas/PaginationMetaPage" - readOnly: true + attributes: + $ref: "#/components/schemas/OrgGroupPolicyCreateAttributes" + relationships: + $ref: "#/components/schemas/OrgGroupPolicyCreateRelationships" + type: + $ref: "#/components/schemas/OrgGroupPolicyType" + required: + - type + - attributes + - relationships type: object - PaginationMetaPage: - description: Offset-based pagination schema. - example: - first_offset: 0 - last_offset: 900 - limit: 100 - next_offset: 100 - offset: 0 - prev_offset: 100 - total: 1000 - type: offset_limit + OrgGroupPolicyCreateRelationships: + description: Relationships for creating a policy. properties: - first_offset: - description: Integer representing the offset to fetch the first page of results. - example: 0 - format: int64 - type: integer - last_offset: - description: Integer representing the offset to fetch the last page of results. - example: 900 - format: int64 - nullable: true - type: integer - limit: - description: Integer representing the number of elements to be returned in the results. - example: 100 - format: int64 - type: integer - next_offset: - description: >- - Integer representing the index of the first element in the next page of results. Equal to page size added to the current offset. - example: 100 - format: int64 - nullable: true - type: integer - offset: - description: Integer representing the index of the first element in the results. - example: 0 - format: int64 - type: integer - prev_offset: - description: Integer representing the index of the first element in the previous page of results. - example: 100 - format: int64 - nullable: true - type: integer - total: - description: Integer representing the total number of elements available. - example: 1000 - format: int64 - nullable: true - type: integer - type: - $ref: "#/components/schemas/PaginationMetaPageType" + org_group: + $ref: "#/components/schemas/OrgGroupRelationshipToOne" + required: + - org_group type: object - PaginationMetaPageType: - default: offset_limit - description: The pagination type used for offset-based pagination. - enum: - - offset_limit - example: offset_limit - type: string - x-enum-varnames: - - OFFSET_LIMIT - Parameter: - description: The definition of `Parameter` object. + OrgGroupPolicyCreateRequest: + description: Request to create an org group policy. properties: - name: - description: The `Parameter` `name`. - example: "" - type: string - value: - description: The `Parameter` `value`. + data: + $ref: "#/components/schemas/OrgGroupPolicyCreateData" required: - - name - - value + - data type: object - PartialAPIKey: - description: Partial Datadog API key. + OrgGroupPolicyData: + description: An org group policy resource. properties: attributes: - $ref: "#/components/schemas/PartialAPIKeyAttributes" + $ref: "#/components/schemas/OrgGroupPolicyAttributes" id: - description: ID of the API key. + description: The ID of the org group policy. + example: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + format: uuid type: string relationships: - $ref: "#/components/schemas/APIKeyRelationships" + $ref: "#/components/schemas/OrgGroupPolicyRelationships" type: - $ref: "#/components/schemas/APIKeysType" + $ref: "#/components/schemas/OrgGroupPolicyType" + required: + - id + - type + - attributes type: object - PartialAPIKeyAttributes: - description: Attributes of a partial API key. + OrgGroupPolicyListResponse: + description: Response containing a list of org group policies. properties: - category: - description: The category of the API key. - type: string + data: + description: An array of org group policies. + items: + $ref: "#/components/schemas/OrgGroupPolicyData" + type: array + meta: + $ref: "#/components/schemas/OrgGroupPaginationMeta" + required: + - data + type: object + OrgGroupPolicyOverrideAttributes: + description: Attributes of an org group policy override. + properties: + content: + additionalProperties: {} + description: The override content as key-value pairs. + type: object created_at: - description: Creation date of the API key. - example: "2020-11-23T10:00:00.000Z" - readOnly: true + description: Timestamp when the override was created. + example: "2024-01-15T10:30:00Z" + format: date-time type: string - date_last_used: - description: Date the API Key was last used. - example: "2020-11-27T10:00:00.000Z" + modified_at: + description: Timestamp when the override was last modified. + example: "2024-01-15T10:30:00Z" format: date-time - nullable: true - readOnly: true type: string - last4: - description: The last four characters of the API key. - example: "abcd" - maxLength: 4 - minLength: 4 - readOnly: true + org_site: + description: The site of the organization that has the override. + example: "datadoghq.com" type: string - modified_at: - description: Date the API key was last modified. - example: "2020-11-23T10:00:00.000Z" - readOnly: true + org_uuid: + description: The UUID of the organization that has the override. + example: "c3d4e5f6-a7b8-9012-cdef-012345678901" + format: uuid type: string - name: - description: Name of the API key. - example: "API Key for submitting metrics" + required: + - org_uuid + - org_site + - created_at + - modified_at + type: object + OrgGroupPolicyOverrideCreateAttributes: + description: Attributes for creating a policy override. + properties: + org_site: + description: The site of the organization. + example: "datadoghq.com" type: string - remote_config_read_enabled: - description: The remote config read enabled status. - type: boolean + org_uuid: + description: The UUID of the organization to grant the override. + example: "c3d4e5f6-a7b8-9012-cdef-012345678901" + format: uuid + type: string + required: + - org_uuid + - org_site type: object - PartialApplicationKey: - description: Partial Datadog application key. + OrgGroupPolicyOverrideCreateData: + description: Data for creating an org group policy override. properties: attributes: - $ref: "#/components/schemas/PartialApplicationKeyAttributes" - id: - description: ID of the application key. - type: string + $ref: "#/components/schemas/OrgGroupPolicyOverrideCreateAttributes" relationships: - $ref: "#/components/schemas/ApplicationKeyRelationships" + $ref: "#/components/schemas/OrgGroupPolicyOverrideCreateRelationships" type: - $ref: "#/components/schemas/ApplicationKeysType" - type: object - PartialApplicationKeyAttributes: - description: Attributes of a partial application key. - properties: - created_at: - description: Creation date of the application key. - example: "2020-11-23T10:00:00.000Z" - readOnly: true - type: string - last4: - description: The last four characters of the application key. - example: "abcd" - maxLength: 4 - minLength: 4 - readOnly: true - type: string - last_used_at: - description: Last usage timestamp of the application key. - example: "2020-12-20T10:00:00.000Z" - nullable: true - readOnly: true - type: string - name: - description: Name of the application key. - example: "Application Key for managing dashboards" - type: string - scopes: - description: Array of scopes to grant the application key. - example: ["dashboards_read", "dashboards_write", "dashboards_public_share"] - items: - description: Name of scope. - type: string - nullable: true - type: array + $ref: "#/components/schemas/OrgGroupPolicyOverrideType" + required: + - type + - attributes + - relationships type: object - PartialApplicationKeyResponse: - description: Response for retrieving a partial application key. + OrgGroupPolicyOverrideCreateRelationships: + description: Relationships for creating a policy override. properties: - data: - $ref: "#/components/schemas/PartialApplicationKey" - included: - description: Array of objects related to the application key. - items: - $ref: "#/components/schemas/ApplicationKeyResponseIncludedItem" - type: array + org_group: + $ref: "#/components/schemas/OrgGroupRelationshipToOne" + org_group_policy: + $ref: "#/components/schemas/OrgGroupPolicyRelationshipToOne" + required: + - org_group + - org_group_policy type: object - PatchAttachmentRequest: - description: Request to update an attachment. + OrgGroupPolicyOverrideCreateRequest: + description: Request to create an org group policy override. properties: data: - $ref: "#/components/schemas/PatchAttachmentRequestData" + $ref: "#/components/schemas/OrgGroupPolicyOverrideCreateData" + required: + - data type: object - PatchAttachmentRequestData: - description: Attachment data for an update request. + OrgGroupPolicyOverrideData: + description: An org group policy override resource. properties: attributes: - $ref: "#/components/schemas/PatchAttachmentRequestDataAttributes" + $ref: "#/components/schemas/OrgGroupPolicyOverrideAttributes" id: - description: The unique identifier of the attachment. - example: "00000000-abcd-0002-0000-000000000000" + description: The ID of the policy override. + example: "9f8e7d6c-5b4a-3210-fedc-ba0987654321" + format: uuid type: string + relationships: + $ref: "#/components/schemas/OrgGroupPolicyOverrideRelationships" type: - $ref: "#/components/schemas/IncidentAttachmentType" + $ref: "#/components/schemas/OrgGroupPolicyOverrideType" required: + - id - type + - attributes type: object - PatchAttachmentRequestDataAttributes: - description: The attributes for updating an attachment. + OrgGroupPolicyOverrideListResponse: + description: Response containing a list of org group policy overrides. properties: - attachment: - $ref: "#/components/schemas/PatchAttachmentRequestDataAttributesAttachment" + data: + description: An array of org group policy overrides. + items: + $ref: "#/components/schemas/OrgGroupPolicyOverrideData" + type: array + meta: + $ref: "#/components/schemas/OrgGroupPaginationMeta" + required: + - data type: object - PatchAttachmentRequestDataAttributesAttachment: - description: The updated attachment object. + OrgGroupPolicyOverrideRelationships: + description: Relationships of an org group policy override. properties: - documentUrl: - description: The updated URL for the attachment. - example: https://app.datadoghq.com/notebook/124/Postmortem-IR-124 - type: string - title: - description: The updated title for the attachment. - example: Postmortem-IR-124 - type: string + org_group: + $ref: "#/components/schemas/OrgGroupRelationshipToOne" + org_group_policy: + $ref: "#/components/schemas/OrgGroupPolicyRelationshipToOne" type: object - PatchComponentRequest: - description: Request object for updating a component. - example: - data: - attributes: - name: Metrics Intake Service - position: 4 - id: 1234abcd-12ab-34cd-56ef-123456abcdef - type: components + OrgGroupPolicyOverrideResponse: + description: Response containing a single org group policy override. properties: data: - $ref: "#/components/schemas/PatchComponentRequestData" + $ref: "#/components/schemas/OrgGroupPolicyOverrideData" + required: + - data type: object - PatchComponentRequestData: - description: The data object for updating a component. + OrgGroupPolicyOverrideSortOption: + default: id + description: Field to sort overrides by. + enum: + - id + - -id + - org_uuid + - -org_uuid + example: id + type: string + x-enum-varnames: + - ID + - MINUS_ID + - ORG_UUID + - MINUS_ORG_UUID + OrgGroupPolicyOverrideType: + description: Org group policy overrides resource type. + enum: + - org_group_policy_overrides + example: org_group_policy_overrides + type: string + x-enum-varnames: + - ORG_GROUP_POLICY_OVERRIDES + OrgGroupPolicyOverrideUpdateAttributes: + description: Attributes for updating a policy override. + properties: + org_site: + description: The site of the organization. + example: "datadoghq.com" + type: string + org_uuid: + description: The UUID of the organization. + example: "c3d4e5f6-a7b8-9012-cdef-012345678901" + format: uuid + type: string + required: + - org_uuid + - org_site + type: object + OrgGroupPolicyOverrideUpdateData: + description: Data for updating a policy override. properties: attributes: - $ref: "#/components/schemas/PatchComponentRequestDataAttributes" + $ref: "#/components/schemas/OrgGroupPolicyOverrideUpdateAttributes" id: - description: The ID of the component. - example: "1234abcd-12ab-34cd-56ef-123456abcdef" + description: The ID of the policy override. + example: "9f8e7d6c-5b4a-3210-fedc-ba0987654321" format: uuid type: string type: - $ref: "#/components/schemas/StatusPagesComponentGroupType" + $ref: "#/components/schemas/OrgGroupPolicyOverrideType" required: - - attributes - id - type + - attributes type: object - PatchComponentRequestDataAttributes: - description: The supported attributes for updating a component. + OrgGroupPolicyOverrideUpdateRequest: + description: Request to update an org group policy override. properties: - name: - description: The name of the component. - example: Web App - type: string - position: - description: The position of the component. If the component belongs to a group, the position is relative to the other components in the group. - example: 1 - format: int64 - type: integer - type: object - PatchDegradationRequest: - description: Request object for updating a degradation. - example: data: - attributes: - components_affected: - - id: 1234abcd-12ab-34cd-56ef-123456abcdef - status: operational - description: We've deployed a fix and latency has returned to normal. This issue has been resolved. - status: resolved - id: 1234abcd-12ab-34cd-56ef-123456abcdef - type: degradations + $ref: "#/components/schemas/OrgGroupPolicyOverrideUpdateData" + required: + - data + type: object + OrgGroupPolicyRelationshipToOne: + description: Relationship to a single org group policy. properties: data: - $ref: "#/components/schemas/PatchDegradationRequestData" + $ref: "#/components/schemas/OrgGroupPolicyRelationshipToOneData" + required: + - data type: object - PatchDegradationRequestData: - description: The data object for updating a degradation. + OrgGroupPolicyRelationshipToOneData: + description: A reference to an org group policy. properties: - attributes: - $ref: "#/components/schemas/PatchDegradationRequestDataAttributes" id: - description: The ID of the degradation. - example: "1234abcd-12ab-34cd-56ef-123456abcdef" + description: The ID of the policy. + example: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" format: uuid type: string type: - $ref: "#/components/schemas/PatchDegradationRequestDataType" + $ref: "#/components/schemas/OrgGroupPolicyType" required: - - attributes - id - type type: object - PatchDegradationRequestDataAttributes: - description: The supported attributes for updating a degradation. + OrgGroupPolicyRelationships: + description: Relationships of an org group policy. properties: - components_affected: - description: The components affected by the degradation. - example: - - id: 1234abcd-12ab-34cd-56ef-123456abcdef - status: operational - items: - $ref: "#/components/schemas/PatchDegradationRequestDataAttributesComponentsAffectedItems" - type: array - description: - description: The description of the degradation. - example: We've deployed a fix and latency has returned to normal. This issue has been resolved. - type: string - status: - $ref: "#/components/schemas/PatchDegradationRequestDataAttributesStatus" - example: resolved - title: - description: The title of the degradation. - example: Elevated API Latency - type: string + org_group: + $ref: "#/components/schemas/OrgGroupRelationshipToOne" type: object - PatchDegradationRequestDataAttributesComponentsAffectedItems: - description: A component affected by a degradation. + OrgGroupPolicyResponse: + description: Response containing a single org group policy. properties: - id: - description: The ID of the component. Must be a component of type `component`. - example: "1234abcd-12ab-34cd-56ef-123456abcdef" - format: uuid - type: string - name: - description: The name of the component. - readOnly: true - type: string - status: - $ref: "#/components/schemas/StatusPagesComponentDataAttributesStatus" + data: + $ref: "#/components/schemas/OrgGroupPolicyData" required: - - id - - status + - data type: object - PatchDegradationRequestDataAttributesStatus: - description: The status of the degradation. + OrgGroupPolicySortOption: + default: id + description: Field to sort policies by. enum: - - investigating - - identified - - monitoring - - resolved + - id + - -id + - name + - -name + example: id type: string x-enum-varnames: - - INVESTIGATING - - IDENTIFIED - - MONITORING - - RESOLVED - PatchDegradationRequestDataType: - default: degradations - description: Degradations resource type. + - ID + - MINUS_ID + - NAME + - MINUS_NAME + OrgGroupPolicyType: + description: Org group policies resource type. enum: - - degradations - example: degradations + - org_group_policies + example: org_group_policies type: string x-enum-varnames: - - DEGRADATIONS - PatchIncidentNotificationTemplateRequest: - description: Update request for a notification template. + - ORG_GROUP_POLICIES + OrgGroupPolicyUpdateAttributes: + description: Attributes for updating an org group policy. + properties: + content: + additionalProperties: {} + description: The policy content as key-value pairs. + example: + value: "UTC" + type: object + type: object + OrgGroupPolicyUpdateData: + description: Data for updating an org group policy. + properties: + attributes: + $ref: "#/components/schemas/OrgGroupPolicyUpdateAttributes" + id: + description: The ID of the policy. + example: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + format: uuid + type: string + type: + $ref: "#/components/schemas/OrgGroupPolicyType" + required: + - id + - type + - attributes + type: object + OrgGroupPolicyUpdateRequest: + description: Request to update an org group policy. properties: data: - $ref: "#/components/schemas/IncidentNotificationTemplateUpdateData" + $ref: "#/components/schemas/OrgGroupPolicyUpdateData" required: - data type: object - PatchMaintenanceRequest: - description: Request object for updating a maintenance. - example: - data: - attributes: - completed_date: "2026-02-18T20:01:13.332360075Z" - in_progress_description: We are currently performing maintenance on the API to improve performance for 40 minutes. - scheduled_description: We will be performing maintenance on the API to improve performance for 40 minutes. - start_date: "2026-02-18T19:21:13.332360075Z" - title: API Maintenance - id: 1234abcd-12ab-34cd-56ef-123456abcdef - type: maintenances + OrgGroupRelationshipToOne: + description: Relationship to a single org group. properties: data: - $ref: "#/components/schemas/PatchMaintenanceRequestData" + $ref: "#/components/schemas/OrgGroupRelationshipToOneData" + required: + - data type: object - PatchMaintenanceRequestData: - description: The data object for updating a maintenance. + OrgGroupRelationshipToOneData: + description: A reference to an org group. properties: - attributes: - $ref: "#/components/schemas/PatchMaintenanceRequestDataAttributes" id: - description: The ID of the maintenance. - example: "1234abcd-12ab-34cd-56ef-123456abcdef" + description: The ID of the org group. + example: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" format: uuid type: string type: - $ref: "#/components/schemas/PatchMaintenanceRequestDataType" + $ref: "#/components/schemas/OrgGroupType" required: - - attributes - - type - id + - type type: object - PatchMaintenanceRequestDataAttributes: - description: The supported attributes for updating a maintenance. + OrgGroupRelationships: + description: Relationships of an org group. properties: - completed_date: - description: Timestamp of when the maintenance was completed. - format: date-time - type: string - completed_description: - description: The description shown when the maintenance is completed. - type: string - components_affected: - description: The components affected by the maintenance. - items: - $ref: "#/components/schemas/PatchMaintenanceRequestDataAttributesComponentsAffectedItems" - type: array - in_progress_description: - description: The description shown while the maintenance is in progress. - type: string - scheduled_description: - description: The description shown when the maintenance is scheduled. - type: string - start_date: - description: Timestamp of when the maintenance is scheduled to start. - format: date-time - type: string - status: - $ref: "#/components/schemas/MaintenanceDataAttributesStatus" - description: The status of the maintenance. - title: - description: The title of the maintenance. - type: string + memberships: + $ref: "#/components/schemas/OrgGroupMembershipsRelationship" type: object - PatchMaintenanceRequestDataAttributesComponentsAffectedItems: - description: A component affected by a maintenance. + OrgGroupResponse: + description: Response containing a single org group. properties: - id: - description: The ID of the component. Must be a component of type `component`. - example: "1234abcd-12ab-34cd-56ef-123456abcdef" - format: uuid - type: string - name: - description: The name of the component. - readOnly: true - type: string - status: - $ref: "#/components/schemas/PatchMaintenanceRequestDataAttributesComponentsAffectedItemsStatus" + data: + $ref: "#/components/schemas/OrgGroupData" required: - - id - - status + - data type: object - PatchMaintenanceRequestDataAttributesComponentsAffectedItemsStatus: - description: The status of the component. + OrgGroupSortOption: + default: uuid + description: Field to sort org groups by. enum: - - operational - - maintenance - example: operational + - name + - -name + - uuid + - -uuid + example: name type: string x-enum-varnames: - - OPERATIONAL - - MAINTENANCE - PatchMaintenanceRequestDataType: - default: maintenances - description: Maintenances resource type. + - NAME + - MINUS_NAME + - UUID + - MINUS_UUID + OrgGroupType: + description: Org groups resource type. enum: - - maintenances - example: maintenances + - org_groups + example: org_groups type: string x-enum-varnames: - - MAINTENANCES + - ORG_GROUPS + OrgGroupUpdateAttributes: + description: Attributes for updating an org group. + properties: + name: + description: The name of the org group. + example: "Updated Org Group Name" + type: string + required: + - name + type: object + OrgGroupUpdateData: + description: Data for updating an org group. + properties: + attributes: + $ref: "#/components/schemas/OrgGroupUpdateAttributes" + id: + description: The ID of the org group. + example: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + format: uuid + type: string + type: + $ref: "#/components/schemas/OrgGroupType" + required: + - id + - type + - attributes + type: object + OrgGroupUpdateRequest: + description: Request to update an org group. + properties: + data: + $ref: "#/components/schemas/OrgGroupUpdateData" + required: + - data + type: object + Organization: + description: Organization object. + properties: + attributes: + $ref: "#/components/schemas/OrganizationAttributes" + id: + description: ID of the organization. + type: string + type: + $ref: "#/components/schemas/OrganizationsType" + required: + - type + type: object + OrganizationAttributes: + description: Attributes of the organization. + properties: + created_at: + description: Creation time of the organization. + format: date-time + type: string + description: + description: Description of the organization. + type: string + disabled: + description: Whether or not the organization is disabled. + type: boolean + modified_at: + description: Time of last organization modification. + format: date-time + type: string + name: + description: Name of the organization. + type: string + public_id: + description: Public ID of the organization. + type: string + sharing: + description: Sharing type of the organization. + type: string + url: + description: URL of the site that this organization exists at. + type: string + type: object + OrganizationsType: + default: orgs + description: Organizations resource type. + enum: + - orgs + example: orgs + type: string + x-enum-varnames: + - ORGS + OutboundEdge: + description: The definition of `OutboundEdge` object. + properties: + branchName: + description: The `OutboundEdge` `branchName`. + example: "" + type: string + nextStepName: + description: The `OutboundEdge` `nextStepName`. + example: "" + type: string + required: + - nextStepName + - branchName + type: object + OutcomeType: + default: outcome + description: The JSON:API type for an outcome. + enum: + - outcome + example: outcome + type: string + x-enum-varnames: + - OUTCOME + OutcomesBatchAttributes: + description: The JSON:API attributes for a batched set of scorecard outcomes. + properties: + results: + description: Set of scorecard outcomes to update. + items: + $ref: "#/components/schemas/OutcomesBatchRequestItem" + type: array + type: object + OutcomesBatchRequest: + description: Scorecard outcomes batch request. + properties: + data: + $ref: "#/components/schemas/OutcomesBatchRequestData" + type: object + OutcomesBatchRequestData: + description: Scorecard outcomes batch request data. + properties: + attributes: + $ref: "#/components/schemas/OutcomesBatchAttributes" + type: + $ref: "#/components/schemas/OutcomesBatchType" + type: object + OutcomesBatchRequestItem: + description: Scorecard outcome for a specific rule, for a given service within a batched update. + properties: + remarks: + description: >- + Any remarks regarding the scorecard rule's evaluation, and supports HTML hyperlinks. + example: 'See: Services' + type: string + rule_id: + $ref: "#/components/schemas/RuleId" + service_name: + description: The unique name for a service in the catalog. + example: my-service + type: string + state: + $ref: "#/components/schemas/State" + required: + - rule_id + - service_name + - state + type: object + OutcomesBatchResponse: + description: Scorecard outcomes batch response. + properties: + data: + $ref: "#/components/schemas/OutcomesBatchResponseData" + example: + - attributes: + service_name: my-service + state: pass + id: "outcome-abc123" + type: rule-outcome + meta: + $ref: "#/components/schemas/OutcomesBatchResponseMeta" + required: + - data + - meta + type: object + OutcomesBatchResponseAttributes: + description: The JSON:API attributes for an outcome. + properties: + created_at: + description: Creation time of the rule outcome. + format: date-time + type: string + modified_at: + description: Time of last rule outcome modification. + format: date-time + type: string + remarks: + description: >- + Any remarks regarding the scorecard rule's evaluation, and supports HTML hyperlinks. + example: 'See: Services' + type: string + service_name: + description: The unique name for a service in the catalog. + example: my-service + type: string + state: + $ref: "#/components/schemas/State" + type: object + OutcomesBatchResponseData: + description: List of rule outcomes which were affected during the bulk operation. + items: + $ref: "#/components/schemas/OutcomesResponseDataItem" + type: array + OutcomesBatchResponseMeta: + description: Metadata pertaining to the bulk operation. + properties: + total_received: + description: Total number of scorecard results received during the bulk operation. + format: int64 + type: integer + total_updated: + description: Total number of scorecard results modified during the bulk operation. + format: int64 + type: integer + type: object + OutcomesBatchType: + default: batched-outcome + description: The JSON:API type for scorecard outcomes. + enum: [batched-outcome] + example: batched-outcome + type: string + x-enum-varnames: [BATCHED_OUTCOME] + OutcomesResponse: + description: Scorecard outcomes - the result of a rule for a service. + properties: + data: + $ref: "#/components/schemas/OutcomesResponseData" + included: + $ref: "#/components/schemas/OutcomesResponseIncluded" + links: + $ref: "#/components/schemas/OutcomesResponseLinks" + type: object + OutcomesResponseData: + description: List of rule outcomes. + items: + $ref: "#/components/schemas/OutcomesResponseDataItem" + type: array + OutcomesResponseDataItem: + description: A single rule outcome. + properties: + attributes: + $ref: "#/components/schemas/OutcomesBatchResponseAttributes" + id: + description: The unique ID for a rule outcome. + type: string + relationships: + $ref: "#/components/schemas/RuleOutcomeRelationships" + type: + $ref: "#/components/schemas/OutcomeType" + type: object + OutcomesResponseIncluded: + description: Array of rule details. + items: + $ref: "#/components/schemas/OutcomesResponseIncludedItem" + type: array + OutcomesResponseIncludedItem: + description: Attributes of the included rule. + properties: + attributes: + $ref: "#/components/schemas/OutcomesResponseIncludedRuleAttributes" + id: + $ref: "#/components/schemas/RuleId" + type: + $ref: "#/components/schemas/RuleType" + type: object + OutcomesResponseIncludedRuleAttributes: + description: Details of a rule. + properties: + name: + description: Name of the rule. + example: Team Defined + type: string + scorecard_name: + description: The scorecard name to which this rule must belong. + example: Observability Best Practices + type: string + type: object + OutcomesResponseLinks: + description: Links attributes. + properties: + next: + description: |- + Link for the next set of results. + example: "/api/v2/scorecard/outcomes?include=rule&page%5Blimit%5D=100&page%5Boffset%5D=100" + type: string + type: object + OutputSchema: + description: "A list of output parameters for the workflow." + properties: + parameters: + description: The `OutputSchema` `parameters`. + items: + $ref: "#/components/schemas/OutputSchemaParameters" + type: array + type: object + OutputSchemaParameters: + description: The definition of `OutputSchemaParameters` object. + properties: + defaultValue: + description: The `OutputSchemaParameters` `defaultValue`. + description: + description: The `OutputSchemaParameters` `description`. + type: string + label: + description: The `OutputSchemaParameters` `label`. + type: string + name: + description: The `OutputSchemaParameters` `name`. + example: "" + type: string + type: + $ref: "#/components/schemas/OutputSchemaParametersType" + value: + description: The `OutputSchemaParameters` `value`. + required: + - name + - type + type: object + OutputSchemaParametersType: + description: The definition of `OutputSchemaParametersType` object. + enum: + - STRING + - NUMBER + - BOOLEAN + - OBJECT + - ARRAY_STRING + - ARRAY_NUMBER + - ARRAY_BOOLEAN + - ARRAY_OBJECT + example: STRING + type: string + x-enum-varnames: + - STRING + - NUMBER + - BOOLEAN + - OBJECT + - ARRAY_STRING + - ARRAY_NUMBER + - ARRAY_BOOLEAN + - ARRAY_OBJECT + OverwriteAllocationsRequest: + description: Request to overwrite targeting rules (allocations) for a feature flag in an environment. + properties: + data: + description: Targeting rules (allocations) to replace existing ones with. + items: + $ref: "#/components/schemas/AllocationDataRequest" + type: array + required: + - data + type: object + PageUrgency: + default: high + description: On-Call Page urgency level. + enum: + - low + - high + example: high + type: string + x-enum-varnames: + - LOW + - HIGH + PaginatedResponseMeta: + description: Metadata for scores response. + properties: + count: + description: Number of entities in this response. + example: 10 + format: int64 + type: integer + limit: + description: Pagination limit. + example: 10 + format: int64 + type: integer + offset: + description: Pagination offset. + example: 0 + format: int64 + type: integer + total: + description: Total number of entities available. + example: 150 + format: int64 + type: integer + required: + - count + - total + - limit + - offset + type: object + Pagination: + description: Pagination object. + properties: + total_count: + description: Total count. + format: int64 + type: integer + total_filtered_count: + description: Total count of elements matched by the filter. + format: int64 + type: integer + type: object + PaginationMeta: + description: Response metadata. + properties: + page: + $ref: "#/components/schemas/PaginationMetaPage" + readOnly: true + type: object + PaginationMetaPage: + description: Offset-based pagination schema. + example: + first_offset: 0 + last_offset: 900 + limit: 100 + next_offset: 100 + offset: 0 + prev_offset: 100 + total: 1000 + type: offset_limit + properties: + first_offset: + description: Integer representing the offset to fetch the first page of results. + example: 0 + format: int64 + type: integer + last_offset: + description: Integer representing the offset to fetch the last page of results. + example: 900 + format: int64 + nullable: true + type: integer + limit: + description: Integer representing the number of elements to be returned in the results. + example: 100 + format: int64 + type: integer + next_offset: + description: >- + Integer representing the index of the first element in the next page of results. Equal to page size added to the current offset. + example: 100 + format: int64 + nullable: true + type: integer + offset: + description: Integer representing the index of the first element in the results. + example: 0 + format: int64 + type: integer + prev_offset: + description: Integer representing the index of the first element in the previous page of results. + example: 100 + format: int64 + nullable: true + type: integer + total: + description: Integer representing the total number of elements available. + example: 1000 + format: int64 + nullable: true + type: integer + type: + $ref: "#/components/schemas/PaginationMetaPageType" + type: object + PaginationMetaPageType: + default: offset_limit + description: The pagination type used for offset-based pagination. + enum: + - offset_limit + example: offset_limit + type: string + x-enum-varnames: + - OFFSET_LIMIT + Parameter: + description: The definition of `Parameter` object. + properties: + name: + description: The `Parameter` `name`. + example: "" + type: string + value: + description: The `Parameter` `value`. + required: + - name + - value + type: object + PartialAPIKey: + description: Partial Datadog API key. + properties: + attributes: + $ref: "#/components/schemas/PartialAPIKeyAttributes" + id: + description: ID of the API key. + type: string + relationships: + $ref: "#/components/schemas/APIKeyRelationships" + type: + $ref: "#/components/schemas/APIKeysType" + type: object + PartialAPIKeyAttributes: + description: Attributes of a partial API key. + properties: + category: + description: The category of the API key. + type: string + created_at: + description: Creation date of the API key. + example: "2020-11-23T10:00:00.000Z" + readOnly: true + type: string + date_last_used: + description: Date the API Key was last used. + example: "2020-11-27T10:00:00.000Z" + format: date-time + nullable: true + readOnly: true + type: string + last4: + description: The last four characters of the API key. + example: "abcd" + maxLength: 4 + minLength: 4 + readOnly: true + type: string + modified_at: + description: Date the API key was last modified. + example: "2020-11-23T10:00:00.000Z" + readOnly: true + type: string + name: + description: Name of the API key. + example: "API Key for submitting metrics" + type: string + remote_config_read_enabled: + description: The remote config read enabled status. + type: boolean + type: object + PartialApplicationKey: + description: Partial Datadog application key. + properties: + attributes: + $ref: "#/components/schemas/PartialApplicationKeyAttributes" + id: + description: ID of the application key. + type: string + relationships: + $ref: "#/components/schemas/ApplicationKeyRelationships" + type: + $ref: "#/components/schemas/ApplicationKeysType" + type: object + PartialApplicationKeyAttributes: + description: Attributes of a partial application key. + properties: + created_at: + description: Creation date of the application key. + example: "2020-11-23T10:00:00.000Z" + readOnly: true + type: string + last4: + description: The last four characters of the application key. + example: "abcd" + maxLength: 4 + minLength: 4 + readOnly: true + type: string + last_used_at: + description: Last usage timestamp of the application key. + example: "2020-12-20T10:00:00.000Z" + nullable: true + readOnly: true + type: string + name: + description: Name of the application key. + example: "Application Key for managing dashboards" + type: string + scopes: + description: Array of scopes to grant the application key. + example: ["dashboards_read", "dashboards_write", "dashboards_public_share"] + items: + description: Name of scope. + type: string + nullable: true + type: array + type: object + PartialApplicationKeyResponse: + description: Response for retrieving a partial application key. + properties: + data: + $ref: "#/components/schemas/PartialApplicationKey" + included: + description: Array of objects related to the application key. + items: + $ref: "#/components/schemas/ApplicationKeyResponseIncludedItem" + type: array + type: object + PatchAttachmentRequest: + description: Request to update an attachment. + properties: + data: + $ref: "#/components/schemas/PatchAttachmentRequestData" + type: object + PatchAttachmentRequestData: + description: Attachment data for an update request. + properties: + attributes: + $ref: "#/components/schemas/PatchAttachmentRequestDataAttributes" + id: + description: The unique identifier of the attachment. + example: "00000000-abcd-0002-0000-000000000000" + type: string + type: + $ref: "#/components/schemas/IncidentAttachmentType" + required: + - type + type: object + PatchAttachmentRequestDataAttributes: + description: The attributes for updating an attachment. + properties: + attachment: + $ref: "#/components/schemas/PatchAttachmentRequestDataAttributesAttachment" + type: object + PatchAttachmentRequestDataAttributesAttachment: + description: The updated attachment object. + properties: + documentUrl: + description: The updated URL for the attachment. + example: https://app.datadoghq.com/notebook/124/Postmortem-IR-124 + type: string + title: + description: The updated title for the attachment. + example: Postmortem-IR-124 + type: string + type: object + PatchComponentRequest: + description: Request object for updating a component. + example: + data: + attributes: + name: Metrics Intake Service + position: 4 + id: 1234abcd-12ab-34cd-56ef-123456abcdef + type: components + properties: + data: + $ref: "#/components/schemas/PatchComponentRequestData" + type: object + PatchComponentRequestData: + description: The data object for updating a component. + properties: + attributes: + $ref: "#/components/schemas/PatchComponentRequestDataAttributes" + id: + description: The ID of the component. + example: "1234abcd-12ab-34cd-56ef-123456abcdef" + format: uuid + type: string + type: + $ref: "#/components/schemas/StatusPagesComponentGroupType" + required: + - attributes + - id + - type + type: object + PatchComponentRequestDataAttributes: + description: The supported attributes for updating a component. + properties: + name: + description: The name of the component. + example: Web App + type: string + position: + description: The position of the component. If the component belongs to a group, the position is relative to the other components in the group. + example: 1 + format: int64 + type: integer + type: object + PatchDegradationRequest: + description: Request object for updating a degradation. + example: + data: + attributes: + components_affected: + - id: 1234abcd-12ab-34cd-56ef-123456abcdef + status: operational + description: We've deployed a fix and latency has returned to normal. This issue has been resolved. + status: resolved + id: 1234abcd-12ab-34cd-56ef-123456abcdef + type: degradations + properties: + data: + $ref: "#/components/schemas/PatchDegradationRequestData" + type: object + PatchDegradationRequestData: + description: The data object for updating a degradation. + properties: + attributes: + $ref: "#/components/schemas/PatchDegradationRequestDataAttributes" + id: + description: The ID of the degradation. + example: "1234abcd-12ab-34cd-56ef-123456abcdef" + format: uuid + type: string + type: + $ref: "#/components/schemas/PatchDegradationRequestDataType" + required: + - attributes + - id + - type + type: object + PatchDegradationRequestDataAttributes: + description: The supported attributes for updating a degradation. + properties: + components_affected: + description: The components affected by the degradation. + example: + - id: 1234abcd-12ab-34cd-56ef-123456abcdef + status: operational + items: + $ref: "#/components/schemas/PatchDegradationRequestDataAttributesComponentsAffectedItems" + type: array + description: + description: The description of the degradation. + example: We've deployed a fix and latency has returned to normal. This issue has been resolved. + type: string + status: + $ref: "#/components/schemas/PatchDegradationRequestDataAttributesStatus" + example: resolved + title: + description: The title of the degradation. + example: Elevated API Latency + type: string + type: object + PatchDegradationRequestDataAttributesComponentsAffectedItems: + description: A component affected by a degradation. + properties: + id: + description: The ID of the component. Must be a component of type `component`. + example: "1234abcd-12ab-34cd-56ef-123456abcdef" + format: uuid + type: string + name: + description: The name of the component. + readOnly: true + type: string + status: + $ref: "#/components/schemas/StatusPagesComponentDataAttributesStatus" + required: + - id + - status + type: object + PatchDegradationRequestDataAttributesStatus: + description: The status of the degradation. + enum: + - investigating + - identified + - monitoring + - resolved + type: string + x-enum-varnames: + - INVESTIGATING + - IDENTIFIED + - MONITORING + - RESOLVED + PatchDegradationRequestDataType: + default: degradations + description: Degradations resource type. + enum: + - degradations + example: degradations + type: string + x-enum-varnames: + - DEGRADATIONS + PatchIncidentNotificationTemplateRequest: + description: Update request for a notification template. + properties: + data: + $ref: "#/components/schemas/IncidentNotificationTemplateUpdateData" + required: + - data + type: object + PatchMaintenanceRequest: + description: Request object for updating a maintenance. + example: + data: + attributes: + completed_date: "2026-02-18T20:01:13.332360075Z" + in_progress_description: We are currently performing maintenance on the API to improve performance for 40 minutes. + scheduled_description: We will be performing maintenance on the API to improve performance for 40 minutes. + start_date: "2026-02-18T19:21:13.332360075Z" + title: API Maintenance + id: 1234abcd-12ab-34cd-56ef-123456abcdef + type: maintenances + properties: + data: + $ref: "#/components/schemas/PatchMaintenanceRequestData" + type: object + PatchMaintenanceRequestData: + description: The data object for updating a maintenance. + properties: + attributes: + $ref: "#/components/schemas/PatchMaintenanceRequestDataAttributes" + id: + description: The ID of the maintenance. + example: "1234abcd-12ab-34cd-56ef-123456abcdef" + format: uuid + type: string + type: + $ref: "#/components/schemas/PatchMaintenanceRequestDataType" + required: + - attributes + - type + - id + type: object + PatchMaintenanceRequestDataAttributes: + description: The supported attributes for updating a maintenance. + properties: + completed_date: + description: Timestamp of when the maintenance was completed. + format: date-time + type: string + completed_description: + description: The description shown when the maintenance is completed. + type: string + components_affected: + description: The components affected by the maintenance. + items: + $ref: "#/components/schemas/PatchMaintenanceRequestDataAttributesComponentsAffectedItems" + type: array + in_progress_description: + description: The description shown while the maintenance is in progress. + type: string + scheduled_description: + description: The description shown when the maintenance is scheduled. + type: string + start_date: + description: Timestamp of when the maintenance is scheduled to start. + format: date-time + type: string + status: + $ref: "#/components/schemas/MaintenanceDataAttributesStatus" + description: The status of the maintenance. + title: + description: The title of the maintenance. + type: string + type: object + PatchMaintenanceRequestDataAttributesComponentsAffectedItems: + description: A component affected by a maintenance. + properties: + id: + description: The ID of the component. Must be a component of type `component`. + example: "1234abcd-12ab-34cd-56ef-123456abcdef" + format: uuid + type: string + name: + description: The name of the component. + readOnly: true + type: string + status: + $ref: "#/components/schemas/PatchMaintenanceRequestDataAttributesComponentsAffectedItemsStatus" + required: + - id + - status + type: object + PatchMaintenanceRequestDataAttributesComponentsAffectedItemsStatus: + description: The status of the component. + enum: + - operational + - maintenance + example: operational + type: string + x-enum-varnames: + - OPERATIONAL + - MAINTENANCE + PatchMaintenanceRequestDataType: + default: maintenances + description: Maintenances resource type. + enum: + - maintenances + example: maintenances + type: string + x-enum-varnames: + - MAINTENANCES PatchNotificationRuleParameters: description: Body of the notification rule patch request. properties: @@ -98085,421 +99151,1012 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/ListInterfaceTagsResponse" + $ref: "#/components/schemas/ListInterfaceTagsResponse" + description: OK + "403": + $ref: "#/components/responses/ForbiddenResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: List tags for an interface + tags: + - Network Device Monitoring + x-menu-order: 6 + patch: + description: Updates the tags associated with the specified interface. + operationId: UpdateInterfaceUserTags + parameters: + - description: The ID of the interface for which to update tags. + example: example:1.2.3.4:1 + in: path + name: interface_id + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/ListInterfaceTagsResponse" + required: true + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/ListInterfaceTagsResponse" + description: OK + "403": + $ref: "#/components/responses/ForbiddenResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: Update the tags for an interface + tags: + - Network Device Monitoring + x-menu-order: 7 + /api/v2/network/connections/aggregate: + get: + description: Get all aggregated connections. + operationId: GetAggregatedConnections + parameters: + - description: Unix timestamp (number of seconds since epoch) of the start of the query window. If not provided, the start of the query window is 15 minutes before the `to` timestamp. If neither `from` nor `to` are provided, the query window is `[now - 15m, now]`. + in: query + name: from + schema: + format: int64 + type: integer + - description: Unix timestamp (number of seconds since epoch) of the end of the query window. If not provided, the end of the query window is the current time. If neither `from` nor `to` are provided, the query window is `[now - 15m, now]`. + in: query + name: to + schema: + format: int64 + type: integer + - description: Comma-separated list of fields to group connections by. The maximum number of group_by(s) is 10. + in: query + name: group_by + schema: + type: string + - description: Comma-separated list of tags to filter connections by. + in: query + name: tags + schema: + type: string + - description: The number of connections to be returned. The maximum value is 7500. The default is 100. + in: query + name: limit + schema: + default: 100 + format: int32 + maximum: 7500 + minimum: 1 + type: integer + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/SingleAggregatedConnectionResponseArray" + description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: Get all aggregated connections + tags: + - Cloud Network Monitoring + x-menu-order: 1 + /api/v2/network/dns/aggregate: + get: + description: Get all aggregated DNS traffic. + operationId: GetAggregatedDns + parameters: + - description: Unix timestamp (number of seconds since epoch) of the start of the query window. If not provided, the start of the query window is 15 minutes before the `to` timestamp. If neither `from` nor `to` are provided, the query window is `[now - 15m, now]`. + in: query + name: from + schema: + format: int64 + type: integer + - description: Unix timestamp (number of seconds since epoch) of the end of the query window. If not provided, the end of the query window is the current time. If neither `from` nor `to` are provided, the query window is `[now - 15m, now]`. + in: query + name: to + schema: + format: int64 + type: integer + - description: Comma-separated list of fields to group DNS traffic by. The server side defaults to `network.dns_query` if unspecified. `server_ungrouped` may be used if groups are not desired. The maximum number of group_by(s) is 10. + in: query + name: group_by + schema: + type: string + - description: Comma-separated list of tags to filter DNS traffic by. + in: query + name: tags + schema: + type: string + - description: The number of aggregated DNS entries to be returned. The maximum value is 7500. The default is 100. + in: query + name: limit + schema: + default: 100 + format: int32 + maximum: 7500 + minimum: 1 + type: integer + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/SingleAggregatedDnsResponseArray" + description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: Get all aggregated DNS traffic + tags: + - Cloud Network Monitoring + x-menu-order: 2 + /api/v2/obs-pipelines/pipelines: + get: + description: Retrieve a list of pipelines. + operationId: ListPipelines + parameters: + - $ref: "#/components/parameters/PageSize" + - $ref: "#/components/parameters/PageNumber" + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/ListPipelinesResponse" + description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" + "403": + $ref: "#/components/responses/NotAuthorizedResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: List pipelines + tags: + - Observability Pipelines + x-menu-order: 1 + "x-permission": + operator: OR + permissions: + - observability_pipelines_read + post: + description: Create a new pipeline. + operationId: CreatePipeline + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/ObservabilityPipelineSpec" + required: true + responses: + "201": + content: + application/json: + schema: + $ref: "#/components/schemas/ObservabilityPipeline" + description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" + "403": + $ref: "#/components/responses/NotAuthorizedResponse" + "409": + $ref: "#/components/responses/ConflictResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: Create a new pipeline + tags: + - Observability Pipelines + x-menu-order: 2 + "x-permission": + operator: OR + permissions: + - observability_pipelines_deploy + /api/v2/obs-pipelines/pipelines/validate: + post: + description: |- + Validates a pipeline configuration without creating or updating any resources. + Returns a list of validation errors, if any. + operationId: ValidatePipeline + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/ObservabilityPipelineSpec" + required: true + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/ValidationResponse" + description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" + "403": + $ref: "#/components/responses/NotAuthorizedResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: Validate an observability pipeline + tags: + - Observability Pipelines + x-menu-order: 6 + "x-permission": + operator: OR + permissions: + - observability_pipelines_read + /api/v2/obs-pipelines/pipelines/{pipeline_id}: + delete: + description: Delete a pipeline. + operationId: DeletePipeline + parameters: + - description: The ID of the pipeline to delete. + in: path + name: pipeline_id + required: true + schema: + type: string + responses: + "204": + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/APIErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/APIErrorResponse" + description: Not Found + "409": + content: + application/json: + schema: + $ref: "#/components/schemas/APIErrorResponse" + description: Conflict + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: Delete a pipeline + tags: + - Observability Pipelines + x-menu-order: 5 + "x-permission": + operator: OR + permissions: + - observability_pipelines_delete + get: + description: Get a specific pipeline by its ID. + operationId: GetPipeline + parameters: + - description: The ID of the pipeline to retrieve. + in: path + name: pipeline_id + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/ObservabilityPipeline" + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/APIErrorResponse" + description: Forbidden + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: Get a specific pipeline + tags: + - Observability Pipelines + x-menu-order: 3 + "x-permission": + operator: OR + permissions: + - observability_pipelines_read + put: + description: Update a pipeline. + operationId: UpdatePipeline + parameters: + - description: The ID of the pipeline to update. + in: path + name: pipeline_id + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/ObservabilityPipeline" + required: true + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/ObservabilityPipeline" + description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" + "403": + $ref: "#/components/responses/NotAuthorizedResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" + "409": + $ref: "#/components/responses/ConflictResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: Update a pipeline + tags: + - Observability Pipelines + x-menu-order: 4 + "x-permission": + operator: OR + permissions: + - observability_pipelines_deploy + /api/v2/on-call/escalation-policies: + post: + description: Create a new On-Call escalation policy + operationId: CreateOnCallEscalationPolicy + parameters: + - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `steps`, `steps.targets`." + in: query + name: include + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/EscalationPolicyCreateRequest" + required: true + responses: + "201": + content: + application/json: + schema: + $ref: "#/components/schemas/EscalationPolicy" + description: Created + "400": + $ref: "#/components/responses/BadRequestResponse" + "401": + $ref: "#/components/responses/UnauthorizedResponse" + "403": + $ref: "#/components/responses/ForbiddenResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + summary: Create On-Call escalation policy + tags: + - On-Call + x-menu-order: 5 + "x-permission": + operator: AND + permissions: + - on_call_write + /api/v2/on-call/escalation-policies/{policy_id}: + delete: + description: Delete an On-Call escalation policy + operationId: DeleteOnCallEscalationPolicy + parameters: + - description: The ID of the escalation policy + in: path + name: policy_id + required: true + schema: + example: a3000000-0000-0000-0000-000000000000 + type: string + responses: + "204": + description: No Content + "401": + $ref: "#/components/responses/UnauthorizedResponse" + "403": + $ref: "#/components/responses/ForbiddenResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + summary: Delete On-Call escalation policy + tags: + - On-Call + x-menu-order: 8 + "x-permission": + operator: AND + permissions: + - on_call_write + get: + description: Get an On-Call escalation policy + operationId: GetOnCallEscalationPolicy + parameters: + - description: The ID of the escalation policy + in: path + name: policy_id + required: true + schema: + example: a3000000-0000-0000-0000-000000000000 + type: string + - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `steps`, `steps.targets`." + in: query + name: include + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/EscalationPolicy" description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" + "401": + $ref: "#/components/responses/UnauthorizedResponse" "403": $ref: "#/components/responses/ForbiddenResponse" "404": $ref: "#/components/responses/NotFoundResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" - summary: List tags for an interface + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + summary: Get On-Call escalation policy tags: - - Network Device Monitoring - x-menu-order: 6 - patch: - description: Updates the tags associated with the specified interface. - operationId: UpdateInterfaceUserTags + - On-Call + x-menu-order: 7 + "x-permission": + operator: AND + permissions: + - on_call_read + put: + description: Update an On-Call escalation policy + operationId: UpdateOnCallEscalationPolicy parameters: - - description: The ID of the interface for which to update tags. - example: example:1.2.3.4:1 + - description: The ID of the escalation policy in: path - name: interface_id + name: policy_id required: true + schema: + example: a3000000-0000-0000-0000-000000000000 + type: string + - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `steps`, `steps.targets`." + in: query + name: include schema: type: string requestBody: content: application/json: schema: - $ref: "#/components/schemas/ListInterfaceTagsResponse" + $ref: "#/components/schemas/EscalationPolicyUpdateRequest" required: true responses: "200": content: application/json: schema: - $ref: "#/components/schemas/ListInterfaceTagsResponse" + $ref: "#/components/schemas/EscalationPolicy" description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" + "401": + $ref: "#/components/responses/UnauthorizedResponse" "403": $ref: "#/components/responses/ForbiddenResponse" "404": $ref: "#/components/responses/NotFoundResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" - summary: Update the tags for an interface + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + summary: Update On-Call escalation policy tags: - - Network Device Monitoring - x-menu-order: 7 - /api/v2/network/connections/aggregate: - get: - description: Get all aggregated connections. - operationId: GetAggregatedConnections - parameters: - - description: Unix timestamp (number of seconds since epoch) of the start of the query window. If not provided, the start of the query window is 15 minutes before the `to` timestamp. If neither `from` nor `to` are provided, the query window is `[now - 15m, now]`. - in: query - name: from - schema: - format: int64 - type: integer - - description: Unix timestamp (number of seconds since epoch) of the end of the query window. If not provided, the end of the query window is the current time. If neither `from` nor `to` are provided, the query window is `[now - 15m, now]`. - in: query - name: to - schema: - format: int64 - type: integer - - description: Comma-separated list of fields to group connections by. The maximum number of group_by(s) is 10. - in: query - name: group_by - schema: - type: string - - description: Comma-separated list of tags to filter connections by. - in: query - name: tags - schema: - type: string - - description: The number of connections to be returned. The maximum value is 7500. The default is 100. - in: query - name: limit - schema: - default: 100 - format: int32 - maximum: 7500 - minimum: 1 - type: integer + - On-Call + x-menu-order: 6 + "x-permission": + operator: AND + permissions: + - on_call_write + /api/v2/on-call/pages: + post: + description: |- + Trigger a new On-Call Page. + operationId: CreateOnCallPage + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/CreatePageRequest" + required: true responses: "200": content: application/json: schema: - $ref: "#/components/schemas/SingleAggregatedConnectionResponseArray" - description: OK - "400": - $ref: "#/components/responses/BadRequestResponse" + $ref: "#/components/schemas/CreatePageResponse" + description: OK. "429": $ref: "#/components/responses/TooManyRequestsResponse" - summary: Get all aggregated connections + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + servers: + - url: https://{site} + variables: + site: + default: navy.oncall.datadoghq.com + description: The globally available endpoint for On-Call. + enum: + - lava.oncall.datadoghq.com + - saffron.oncall.datadoghq.com + - navy.oncall.datadoghq.com + - coral.oncall.datadoghq.com + - teal.oncall.datadoghq.com + - beige.oncall.datadoghq.eu + - url: "{protocol}://{name}" + variables: + name: + default: api.datadoghq.com + description: Full site DNS name. + protocol: + default: https + description: The protocol for accessing the API. + - url: https://{subdomain}.{site} + variables: + site: + default: datadoghq.com + description: Any Datadog deployment. + subdomain: + default: api + description: The subdomain where the API is deployed. + summary: Create On-Call Page tags: - - Cloud Network Monitoring + - On-Call Paging x-menu-order: 1 - /api/v2/network/dns/aggregate: - get: - description: Get all aggregated DNS traffic. - operationId: GetAggregatedDns + /api/v2/on-call/pages/{page_id}/acknowledge: + post: + description: |- + Acknowledges an On-Call Page. + operationId: AcknowledgeOnCallPage parameters: - - description: Unix timestamp (number of seconds since epoch) of the start of the query window. If not provided, the start of the query window is 15 minutes before the `to` timestamp. If neither `from` nor `to` are provided, the query window is `[now - 15m, now]`. - in: query - name: from - schema: - format: int64 - type: integer - - description: Unix timestamp (number of seconds since epoch) of the end of the query window. If not provided, the end of the query window is the current time. If neither `from` nor `to` are provided, the query window is `[now - 15m, now]`. - in: query - name: to - schema: - format: int64 - type: integer - - description: Comma-separated list of fields to group DNS traffic by. The server side defaults to `network.dns_query` if unspecified. `server_ungrouped` may be used if groups are not desired. The maximum number of group_by(s) is 10. - in: query - name: group_by - schema: - type: string - - description: Comma-separated list of tags to filter DNS traffic by. - in: query - name: tags + - description: The page ID. + in: path + name: page_id + required: true schema: + example: 15e74b8b-f865-48d0-bcc5-453323ed2c8f + format: uuid type: string - - description: The number of aggregated DNS entries to be returned. The maximum value is 7500. The default is 100. - in: query - name: limit - schema: - default: 100 - format: int32 - maximum: 7500 - minimum: 1 - type: integer responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/SingleAggregatedDnsResponseArray" - description: OK - "400": - $ref: "#/components/responses/BadRequestResponse" + "202": + description: Accepted. "429": $ref: "#/components/responses/TooManyRequestsResponse" - summary: Get all aggregated DNS traffic + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + servers: + - url: https://{site} + variables: + site: + default: navy.oncall.datadoghq.com + description: The globally available endpoint for On-Call. + enum: + - lava.oncall.datadoghq.com + - saffron.oncall.datadoghq.com + - navy.oncall.datadoghq.com + - coral.oncall.datadoghq.com + - teal.oncall.datadoghq.com + - beige.oncall.datadoghq.eu + - url: "{protocol}://{name}" + variables: + name: + default: api.datadoghq.com + description: Full site DNS name. + protocol: + default: https + description: The protocol for accessing the API. + - url: https://{subdomain}.{site} + variables: + site: + default: datadoghq.com + description: Any Datadog deployment. + subdomain: + default: api + description: The subdomain where the API is deployed. + summary: Acknowledge On-Call Page tags: - - Cloud Network Monitoring + - On-Call Paging x-menu-order: 2 - /api/v2/obs-pipelines/pipelines: - get: - description: Retrieve a list of pipelines. - operationId: ListPipelines + /api/v2/on-call/pages/{page_id}/escalate: + post: + description: |- + Escalates an On-Call Page. + operationId: EscalateOnCallPage parameters: - - $ref: "#/components/parameters/PageSize" - - $ref: "#/components/parameters/PageNumber" + - description: The page ID. + in: path + name: page_id + required: true + schema: + example: 15e74b8b-f865-48d0-bcc5-453323ed2c8f + format: uuid + type: string responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/ListPipelinesResponse" - description: OK - "400": - $ref: "#/components/responses/BadRequestResponse" - "403": - $ref: "#/components/responses/NotAuthorizedResponse" + "202": + description: Accepted. "429": $ref: "#/components/responses/TooManyRequestsResponse" - summary: List pipelines + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + servers: + - url: https://{site} + variables: + site: + default: navy.oncall.datadoghq.com + description: The globally available endpoint for On-Call. + enum: + - lava.oncall.datadoghq.com + - saffron.oncall.datadoghq.com + - navy.oncall.datadoghq.com + - coral.oncall.datadoghq.com + - teal.oncall.datadoghq.com + - beige.oncall.datadoghq.eu + - url: "{protocol}://{name}" + variables: + name: + default: api.datadoghq.com + description: Full site DNS name. + protocol: + default: https + description: The protocol for accessing the API. + - url: https://{subdomain}.{site} + variables: + site: + default: datadoghq.com + description: Any Datadog deployment. + subdomain: + default: api + description: The subdomain where the API is deployed. + summary: Escalate On-Call Page tags: - - Observability Pipelines - x-menu-order: 1 - "x-permission": - operator: OR - permissions: - - observability_pipelines_read + - On-Call Paging + x-menu-order: 3 + /api/v2/on-call/pages/{page_id}/resolve: post: - description: Create a new pipeline. - operationId: CreatePipeline - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/ObservabilityPipelineSpec" - required: true + description: |- + Resolves an On-Call Page. + operationId: ResolveOnCallPage + parameters: + - description: The page ID. + in: path + name: page_id + required: true + schema: + example: 15e74b8b-f865-48d0-bcc5-453323ed2c8f + format: uuid + type: string responses: - "201": - content: - application/json: - schema: - $ref: "#/components/schemas/ObservabilityPipeline" - description: OK - "400": - $ref: "#/components/responses/BadRequestResponse" - "403": - $ref: "#/components/responses/NotAuthorizedResponse" - "409": - $ref: "#/components/responses/ConflictResponse" + "202": + description: Accepted. "429": $ref: "#/components/responses/TooManyRequestsResponse" - summary: Create a new pipeline + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + servers: + - url: https://{site} + variables: + site: + default: navy.oncall.datadoghq.com + description: The globally available endpoint for On-Call. + enum: + - lava.oncall.datadoghq.com + - saffron.oncall.datadoghq.com + - navy.oncall.datadoghq.com + - coral.oncall.datadoghq.com + - teal.oncall.datadoghq.com + - beige.oncall.datadoghq.eu + - url: "{protocol}://{name}" + variables: + name: + default: api.datadoghq.com + description: Full site DNS name. + protocol: + default: https + description: The protocol for accessing the API. + - url: https://{subdomain}.{site} + variables: + site: + default: datadoghq.com + description: Any Datadog deployment. + subdomain: + default: api + description: The subdomain where the API is deployed. + summary: Resolve On-Call Page tags: - - Observability Pipelines - x-menu-order: 2 - "x-permission": - operator: OR - permissions: - - observability_pipelines_deploy - /api/v2/obs-pipelines/pipelines/validate: + - On-Call Paging + x-menu-order: 4 + /api/v2/on-call/schedules: post: - description: |- - Validates a pipeline configuration without creating or updating any resources. - Returns a list of validation errors, if any. - operationId: ValidatePipeline + description: Create a new On-Call schedule + operationId: CreateOnCallSchedule + parameters: + - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `layers`, `layers.members`, `layers.members.user`." + in: query + name: include + schema: + type: string requestBody: content: - application/json: + "application/json": schema: - $ref: "#/components/schemas/ObservabilityPipelineSpec" + $ref: "#/components/schemas/ScheduleCreateRequest" required: true responses: - "200": + "201": content: - application/json: + "application/json": schema: - $ref: "#/components/schemas/ValidationResponse" - description: OK + $ref: "#/components/schemas/Schedule" + description: Created "400": $ref: "#/components/responses/BadRequestResponse" + "401": + $ref: "#/components/responses/UnauthorizedResponse" "403": - $ref: "#/components/responses/NotAuthorizedResponse" + $ref: "#/components/responses/ForbiddenResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" - summary: Validate an observability pipeline + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + summary: Create On-Call schedule tags: - - Observability Pipelines - x-menu-order: 6 + - On-Call + x-menu-order: 1 "x-permission": - operator: OR + operator: AND permissions: - - observability_pipelines_read - /api/v2/obs-pipelines/pipelines/{pipeline_id}: + - on_call_write + /api/v2/on-call/schedules/{schedule_id}: delete: - description: Delete a pipeline. - operationId: DeletePipeline + description: Delete an On-Call schedule + operationId: DeleteOnCallSchedule parameters: - - description: The ID of the pipeline to delete. + - description: The ID of the schedule in: path - name: pipeline_id + name: schedule_id required: true schema: + example: 3653d3c6-0c75-11ea-ad28-fb5701eabc7d type: string responses: "204": - description: OK + description: No Content + "401": + $ref: "#/components/responses/UnauthorizedResponse" "403": - content: - application/json: - schema: - $ref: "#/components/schemas/APIErrorResponse" - description: Forbidden + $ref: "#/components/responses/ForbiddenResponse" "404": - content: - application/json: - schema: - $ref: "#/components/schemas/APIErrorResponse" - description: Not Found - "409": - content: - application/json: - schema: - $ref: "#/components/schemas/APIErrorResponse" - description: Conflict + $ref: "#/components/responses/NotFoundResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" - summary: Delete a pipeline + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + summary: Delete On-Call schedule tags: - - Observability Pipelines - x-menu-order: 5 + - On-Call + x-menu-order: 3 "x-permission": - operator: OR + operator: AND permissions: - - observability_pipelines_delete + - on_call_write get: - description: Get a specific pipeline by its ID. - operationId: GetPipeline + description: Get an On-Call schedule + operationId: GetOnCallSchedule parameters: - - description: The ID of the pipeline to retrieve. + - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `layers`, `layers.members`, `layers.members.user`." + in: query + name: include + schema: + type: string + - description: The ID of the schedule in: path - name: pipeline_id + name: schedule_id required: true schema: + example: 3653d3c6-0c75-11ea-ad28-fb5701eabc7d type: string responses: "200": content: - application/json: - schema: - $ref: "#/components/schemas/ObservabilityPipeline" - description: OK - "403": - content: - application/json: + "application/json": schema: - $ref: "#/components/schemas/APIErrorResponse" - description: Forbidden + $ref: "#/components/schemas/Schedule" + description: OK + "401": + $ref: "#/components/responses/UnauthorizedResponse" + "403": + $ref: "#/components/responses/ForbiddenResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" - summary: Get a specific pipeline + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + summary: Get On-Call schedule tags: - - Observability Pipelines - x-menu-order: 3 + - On-Call + x-menu-order: 2 "x-permission": - operator: OR + operator: AND permissions: - - observability_pipelines_read + - on_call_read put: - description: Update a pipeline. - operationId: UpdatePipeline + description: Update a new On-Call schedule + operationId: UpdateOnCallSchedule parameters: - - description: The ID of the pipeline to update. + - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `layers`, `layers.members`, `layers.members.user`." + in: query + name: include + schema: + type: string + - description: The ID of the schedule in: path - name: pipeline_id + name: schedule_id required: true schema: + example: 3653d3c6-0c75-11ea-ad28-fb5701eabc7d type: string requestBody: content: - application/json: + "application/json": schema: - $ref: "#/components/schemas/ObservabilityPipeline" + $ref: "#/components/schemas/ScheduleUpdateRequest" required: true responses: "200": content: - application/json: + "application/json": schema: - $ref: "#/components/schemas/ObservabilityPipeline" + $ref: "#/components/schemas/Schedule" description: OK "400": $ref: "#/components/responses/BadRequestResponse" + "401": + $ref: "#/components/responses/UnauthorizedResponse" "403": - $ref: "#/components/responses/NotAuthorizedResponse" + $ref: "#/components/responses/ForbiddenResponse" "404": $ref: "#/components/responses/NotFoundResponse" - "409": - $ref: "#/components/responses/ConflictResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" - summary: Update a pipeline + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + summary: Update On-Call schedule tags: - - Observability Pipelines + - On-Call x-menu-order: 4 "x-permission": - operator: OR + operator: AND permissions: - - observability_pipelines_deploy - /api/v2/on-call/escalation-policies: - post: - description: Create a new On-Call escalation policy - operationId: CreateOnCallEscalationPolicy + - on_call_write + /api/v2/on-call/schedules/{schedule_id}/on-call: + get: + description: "Retrieves the user who is on-call for the specified schedule at a given time." + operationId: GetScheduleOnCallUser parameters: - - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `steps`, `steps.targets`." + - description: "Specifies related resources to include in the response as a comma-separated list. Allowed value: `user`." in: query name: include schema: type: string - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/EscalationPolicyCreateRequest" - required: true + - description: The ID of the schedule. + in: path + name: schedule_id + required: true + schema: + example: 3653d3c6-0c75-11ea-ad28-fb5701eabc7d + type: string + - description: Retrieves the on-call user at the given timestamp in RFC3339 format (for example, `2025-05-07T02:53:01Z` or `2025-05-07T02:53:01+00:00`). When using timezone offsets with `+` or `-`, ensure proper URL encoding (`+` should be encoded as `%2B`). Defaults to the current time if omitted. + in: query + name: filter[at_ts] + schema: + example: "2025-05-07T02:53:01Z" + type: string responses: - "201": + "200": content: application/json: schema: - $ref: "#/components/schemas/EscalationPolicy" - description: Created + $ref: "#/components/schemas/Shift" + description: OK "400": $ref: "#/components/responses/BadRequestResponse" "401": $ref: "#/components/responses/UnauthorizedResponse" "403": $ref: "#/components/responses/ForbiddenResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" security: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - summary: Create On-Call escalation policy + summary: Get scheduled on-call user tags: - On-Call - x-menu-order: 5 + x-menu-order: 11 "x-permission": operator: AND permissions: - - on_call_write - /api/v2/on-call/escalation-policies/{policy_id}: - delete: - description: Delete an On-Call escalation policy - operationId: DeleteOnCallEscalationPolicy + - on_call_read + /api/v2/on-call/teams/{team_id}/on-call: + get: + description: Get a team's on-call users at a given time + operationId: GetTeamOnCallUsers parameters: - - description: The ID of the escalation policy + - description: "Comma-separated list of included relationships to be returned. Allowed values: `responders`, `escalations`, `escalations.responders`." + in: query + name: include + schema: + type: string + - description: The team ID in: path - name: policy_id + name: team_id required: true schema: - example: a3000000-0000-0000-0000-000000000000 + example: 27590dae-47be-4a7d-9abf-8f4e45124020 type: string responses: - "204": - description: No Content + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/TeamOnCallResponders" + description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" "401": $ref: "#/components/responses/UnauthorizedResponse" "403": @@ -98512,26 +100169,27 @@ paths: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - summary: Delete On-Call escalation policy + summary: Get team on-call users tags: - On-Call - x-menu-order: 8 + x-menu-order: 12 "x-permission": operator: AND permissions: - - on_call_write + - on_call_read + /api/v2/on-call/teams/{team_id}/routing-rules: get: - description: Get an On-Call escalation policy - operationId: GetOnCallEscalationPolicy + description: Get a team's On-Call routing rules + operationId: GetOnCallTeamRoutingRules parameters: - - description: The ID of the escalation policy + - description: The team ID in: path - name: policy_id + name: team_id required: true schema: - example: a3000000-0000-0000-0000-000000000000 + example: 27590dae-47be-4a7d-9abf-8f4e45124020 type: string - - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `steps`, `steps.targets`." + - description: "Comma-separated list of included relationships to be returned. Allowed values: `rules`, `rules.policy`." in: query name: include schema: @@ -98541,42 +100199,34 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/EscalationPolicy" + $ref: "#/components/schemas/TeamRoutingRules" description: OK - "400": - $ref: "#/components/responses/BadRequestResponse" - "401": - $ref: "#/components/responses/UnauthorizedResponse" - "403": - $ref: "#/components/responses/ForbiddenResponse" - "404": - $ref: "#/components/responses/NotFoundResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" security: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - summary: Get On-Call escalation policy + summary: Get On-Call team routing rules tags: - On-Call - x-menu-order: 7 + x-menu-order: 9 "x-permission": operator: AND permissions: - on_call_read put: - description: Update an On-Call escalation policy - operationId: UpdateOnCallEscalationPolicy + description: Set a team's On-Call routing rules + operationId: SetOnCallTeamRoutingRules parameters: - - description: The ID of the escalation policy + - description: The team ID in: path - name: policy_id + name: team_id required: true schema: - example: a3000000-0000-0000-0000-000000000000 + example: 27590dae-47be-4a7d-9abf-8f4e45124020 type: string - - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `steps`, `steps.targets`." + - description: "Comma-separated list of included relationships to be returned. Allowed values: `rules`, `rules.policy`." in: query name: include schema: @@ -98585,14 +100235,47 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/EscalationPolicyUpdateRequest" + $ref: "#/components/schemas/TeamRoutingRulesRequest" required: true responses: "200": content: application/json: schema: - $ref: "#/components/schemas/EscalationPolicy" + $ref: "#/components/schemas/TeamRoutingRules" + description: OK + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: [] + summary: Set On-Call team routing rules + tags: + - On-Call + x-menu-order: 10 + "x-permission": + operator: AND + permissions: + - on_call_write + /api/v2/on-call/users/{user_id}/notification-channels: + get: + description: List the notification channels for a user. The authenticated user must be the target user or have the `on_call_admin` permission + operationId: ListUserNotificationChannels + parameters: + - description: The user ID + in: path + name: user_id + required: true + schema: + example: 00000000-0000-0000-0000-000000000000 + type: string + responses: + "200": + content: + "application/json": + schema: + $ref: "#/components/schemas/ListNotificationChannelsResponse" description: OK "400": $ref: "#/components/responses/BadRequestResponse" @@ -98608,261 +100291,220 @@ paths: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - summary: Update On-Call escalation policy + summary: List On-Call notification channels for a user tags: - On-Call - x-menu-order: 6 + x-menu-order: 15 "x-permission": operator: AND permissions: - - on_call_write - /api/v2/on-call/pages: + - on_call_read post: - description: |- - Trigger a new On-Call Page. - operationId: CreateOnCallPage + description: Create a new notification channel for a user. The authenticated user must be the target user or have the `on_call_admin` permission + operationId: CreateUserNotificationChannel + parameters: + - description: The user ID + in: path + name: user_id + required: true + schema: + example: 00000000-0000-0000-0000-000000000000 + type: string requestBody: content: - application/json: + "application/json": schema: - $ref: "#/components/schemas/CreatePageRequest" + $ref: "#/components/schemas/CreateUserNotificationChannelRequest" required: true responses: - "200": + "201": content: - application/json: + "application/json": schema: - $ref: "#/components/schemas/CreatePageResponse" - description: OK. + $ref: "#/components/schemas/NotificationChannel" + description: Created + "400": + $ref: "#/components/responses/BadRequestResponse" + "401": + $ref: "#/components/responses/UnauthorizedResponse" + "403": + $ref: "#/components/responses/ForbiddenResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" security: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - servers: - - url: https://{site} - variables: - site: - default: navy.oncall.datadoghq.com - description: The globally available endpoint for On-Call. - enum: - - lava.oncall.datadoghq.com - - saffron.oncall.datadoghq.com - - navy.oncall.datadoghq.com - - coral.oncall.datadoghq.com - - teal.oncall.datadoghq.com - - beige.oncall.datadoghq.eu - - url: "{protocol}://{name}" - variables: - name: - default: api.datadoghq.com - description: Full site DNS name. - protocol: - default: https - description: The protocol for accessing the API. - - url: https://{subdomain}.{site} - variables: - site: - default: datadoghq.com - description: Any Datadog deployment. - subdomain: - default: api - description: The subdomain where the API is deployed. - summary: Create On-Call Page + summary: Create an On-Call notification channel for a user tags: - - On-Call Paging - x-menu-order: 1 - /api/v2/on-call/pages/{page_id}/acknowledge: - post: - description: |- - Acknowledges an On-Call Page. - operationId: AcknowledgeOnCallPage + - On-Call + x-menu-order: 16 + "x-permission": + operator: AND + permissions: + - on_call_respond + /api/v2/on-call/users/{user_id}/notification-channels/{channel_id}: + delete: + description: Delete a notification channel for a user. The authenticated user must be the target user or have the `on_call_admin` permission + operationId: DeleteUserNotificationChannel parameters: - - description: The page ID. + - description: The user ID in: path - name: page_id + name: user_id required: true schema: - example: 15e74b8b-f865-48d0-bcc5-453323ed2c8f - format: uuid + example: 00000000-0000-0000-0000-000000000000 + type: string + - description: The channel ID + in: path + name: channel_id + required: true + schema: + example: 00000000-0000-0000-0000-000000000000 type: string responses: - "202": - description: Accepted. + "204": + description: No Content + "400": + $ref: "#/components/responses/BadRequestResponse" + "401": + $ref: "#/components/responses/UnauthorizedResponse" + "403": + $ref: "#/components/responses/ForbiddenResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" security: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - servers: - - url: https://{site} - variables: - site: - default: navy.oncall.datadoghq.com - description: The globally available endpoint for On-Call. - enum: - - lava.oncall.datadoghq.com - - saffron.oncall.datadoghq.com - - navy.oncall.datadoghq.com - - coral.oncall.datadoghq.com - - teal.oncall.datadoghq.com - - beige.oncall.datadoghq.eu - - url: "{protocol}://{name}" - variables: - name: - default: api.datadoghq.com - description: Full site DNS name. - protocol: - default: https - description: The protocol for accessing the API. - - url: https://{subdomain}.{site} - variables: - site: - default: datadoghq.com - description: Any Datadog deployment. - subdomain: - default: api - description: The subdomain where the API is deployed. - summary: Acknowledge On-Call Page + summary: Delete an On-Call notification channel for a user tags: - - On-Call Paging - x-menu-order: 2 - /api/v2/on-call/pages/{page_id}/escalate: - post: - description: |- - Escalates an On-Call Page. - operationId: EscalateOnCallPage + - On-Call + x-menu-order: 13 + "x-permission": + operator: AND + permissions: + - on_call_respond + get: + description: Get a notification channel for a user. The authenticated user must be the target user or have the `on_call_admin` permission + operationId: GetUserNotificationChannel parameters: - - description: The page ID. + - description: The user ID in: path - name: page_id + name: user_id required: true schema: - example: 15e74b8b-f865-48d0-bcc5-453323ed2c8f - format: uuid + example: 00000000-0000-0000-0000-000000000000 + type: string + - description: The channel ID + in: path + name: channel_id + required: true + schema: + example: 00000000-0000-0000-0000-000000000000 type: string responses: - "202": - description: Accepted. + "200": + content: + "application/json": + schema: + $ref: "#/components/schemas/NotificationChannel" + description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" + "401": + $ref: "#/components/responses/UnauthorizedResponse" + "403": + $ref: "#/components/responses/ForbiddenResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" security: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - servers: - - url: https://{site} - variables: - site: - default: navy.oncall.datadoghq.com - description: The globally available endpoint for On-Call. - enum: - - lava.oncall.datadoghq.com - - saffron.oncall.datadoghq.com - - navy.oncall.datadoghq.com - - coral.oncall.datadoghq.com - - teal.oncall.datadoghq.com - - beige.oncall.datadoghq.eu - - url: "{protocol}://{name}" - variables: - name: - default: api.datadoghq.com - description: Full site DNS name. - protocol: - default: https - description: The protocol for accessing the API. - - url: https://{subdomain}.{site} - variables: - site: - default: datadoghq.com - description: Any Datadog deployment. - subdomain: - default: api - description: The subdomain where the API is deployed. - summary: Escalate On-Call Page + summary: Get an On-Call notification channel for a user tags: - - On-Call Paging - x-menu-order: 3 - /api/v2/on-call/pages/{page_id}/resolve: - post: - description: |- - Resolves an On-Call Page. - operationId: ResolveOnCallPage + - On-Call + x-menu-order: 14 + "x-permission": + operator: AND + permissions: + - on_call_read + /api/v2/on-call/users/{user_id}/notification-rules: + get: + description: List the notification rules for a user. The authenticated user must be the target user or have the `on_call_admin` permission + operationId: ListUserNotificationRules parameters: - - description: The page ID. + - description: "Comma-separated list of included relationships to be returned. Allowed values: `channel`." + in: query + name: include + schema: + type: string + - description: The user ID in: path - name: page_id + name: user_id required: true schema: - example: 15e74b8b-f865-48d0-bcc5-453323ed2c8f - format: uuid + example: 00000000-0000-0000-0000-000000000000 type: string responses: - "202": - description: Accepted. + "200": + content: + "application/json": + schema: + $ref: "#/components/schemas/ListOnCallNotificationRulesResponse" + description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" + "401": + $ref: "#/components/responses/UnauthorizedResponse" + "403": + $ref: "#/components/responses/ForbiddenResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" security: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - servers: - - url: https://{site} - variables: - site: - default: navy.oncall.datadoghq.com - description: The globally available endpoint for On-Call. - enum: - - lava.oncall.datadoghq.com - - saffron.oncall.datadoghq.com - - navy.oncall.datadoghq.com - - coral.oncall.datadoghq.com - - teal.oncall.datadoghq.com - - beige.oncall.datadoghq.eu - - url: "{protocol}://{name}" - variables: - name: - default: api.datadoghq.com - description: Full site DNS name. - protocol: - default: https - description: The protocol for accessing the API. - - url: https://{subdomain}.{site} - variables: - site: - default: datadoghq.com - description: Any Datadog deployment. - subdomain: - default: api - description: The subdomain where the API is deployed. - summary: Resolve On-Call Page + summary: List On-Call notification rules for a user tags: - - On-Call Paging - x-menu-order: 4 - /api/v2/on-call/schedules: + - On-Call + x-menu-order: 18 + "x-permission": + operator: AND + permissions: + - on_call_read post: - description: Create a new On-Call schedule - operationId: CreateOnCallSchedule + description: Create a new notification rule for a user. The authenticated user must be the target user or have the `on_call_admin` permission + operationId: CreateUserNotificationRule parameters: - - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `layers`, `layers.members`, `layers.members.user`." - in: query - name: include + - description: The user ID + in: path + name: user_id + required: true schema: + example: 00000000-0000-0000-0000-000000000000 type: string requestBody: content: "application/json": schema: - $ref: "#/components/schemas/ScheduleCreateRequest" + $ref: "#/components/schemas/CreateOnCallNotificationRuleRequest" required: true responses: "201": content: "application/json": schema: - $ref: "#/components/schemas/Schedule" + $ref: "#/components/schemas/OnCallNotificationRule" description: Created "400": $ref: "#/components/responses/BadRequestResponse" @@ -98870,35 +100512,46 @@ paths: $ref: "#/components/responses/UnauthorizedResponse" "403": $ref: "#/components/responses/ForbiddenResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" security: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - summary: Create On-Call schedule + summary: Create an On-Call notification rule for a user tags: - On-Call - x-menu-order: 1 + x-menu-order: 17 "x-permission": operator: AND permissions: - - on_call_write - /api/v2/on-call/schedules/{schedule_id}: + - on_call_respond + /api/v2/on-call/users/{user_id}/notification-rules/{rule_id}: delete: - description: Delete an On-Call schedule - operationId: DeleteOnCallSchedule + description: Delete a notification rule for a user. The authenticated user must be the target user or have the `on_call_admin` permission + operationId: DeleteUserNotificationRule parameters: - - description: The ID of the schedule + - description: The user ID in: path - name: schedule_id + name: user_id required: true schema: - example: 3653d3c6-0c75-11ea-ad28-fb5701eabc7d + example: 00000000-0000-0000-0000-000000000000 + type: string + - description: The rule ID + in: path + name: rule_id + required: true + schema: + example: 00000000-0000-0000-0000-000000000000 type: string responses: "204": description: No Content + "400": + $ref: "#/components/responses/BadRequestResponse" "401": $ref: "#/components/responses/UnauthorizedResponse" "403": @@ -98911,37 +100564,46 @@ paths: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - summary: Delete On-Call schedule + summary: Delete an On-Call notification rule for a user tags: - On-Call - x-menu-order: 3 + x-menu-order: 19 "x-permission": - operator: AND + operator: OR permissions: - - on_call_write + - on_call_respond get: - description: Get an On-Call schedule - operationId: GetOnCallSchedule + description: Get a notification rule for a user. The authenticated user must be the target user or have the `on_call_admin` permission + operationId: GetUserNotificationRule parameters: - - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `layers`, `layers.members`, `layers.members.user`." - in: query - name: include + - description: The user ID + in: path + name: user_id + required: true schema: + example: 00000000-0000-0000-0000-000000000000 type: string - - description: The ID of the schedule + - description: The rule ID in: path - name: schedule_id + name: rule_id required: true schema: - example: 3653d3c6-0c75-11ea-ad28-fb5701eabc7d + example: 00000000-0000-0000-0000-000000000000 + type: string + - description: "Comma-separated list of included relationships to be returned. Allowed values: `channel`." + in: query + name: include + schema: type: string responses: "200": content: "application/json": schema: - $ref: "#/components/schemas/Schedule" + $ref: "#/components/schemas/OnCallNotificationRule" description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" "401": $ref: "#/components/responses/UnauthorizedResponse" "403": @@ -98954,42 +100616,49 @@ paths: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - summary: Get On-Call schedule + summary: Get an On-Call notification rule for a user tags: - On-Call - x-menu-order: 2 + x-menu-order: 20 "x-permission": - operator: AND + operator: OR permissions: - on_call_read put: - description: Update a new On-Call schedule - operationId: UpdateOnCallSchedule + description: Update a notification rule for a user. The authenticated user must be the target user or have the `on_call_admin` permission + operationId: UpdateUserNotificationRule parameters: - - description: "Comma-separated list of included relationships to be returned. Allowed values: `teams`, `layers`, `layers.members`, `layers.members.user`." - in: query - name: include + - description: The user ID + in: path + name: user_id + required: true schema: + example: 00000000-0000-0000-0000-000000000000 type: string - - description: The ID of the schedule + - description: The rule ID in: path - name: schedule_id + name: rule_id required: true schema: - example: 3653d3c6-0c75-11ea-ad28-fb5701eabc7d + example: 00000000-0000-0000-0000-000000000000 + type: string + - description: "Comma-separated list of included relationships to be returned. Allowed values: `channel`." + in: query + name: include + schema: type: string requestBody: content: "application/json": schema: - $ref: "#/components/schemas/ScheduleUpdateRequest" + $ref: "#/components/schemas/UpdateOnCallNotificationRuleRequest" required: true responses: "200": content: "application/json": schema: - $ref: "#/components/schemas/Schedule" + $ref: "#/components/schemas/OnCallNotificationRule" description: OK "400": $ref: "#/components/responses/BadRequestResponse" @@ -99005,43 +100674,158 @@ paths: - apiKeyAuth: [] appKeyAuth: [] - AuthZ: [] - summary: Update On-Call schedule + summary: Update an On-Call notification rule for a user tags: - On-Call - x-menu-order: 4 + x-menu-order: 21 "x-permission": - operator: AND + operator: OR permissions: - - on_call_write - /api/v2/on-call/schedules/{schedule_id}/on-call: + - on_call_read + /api/v2/org_configs: get: - description: "Retrieves the user who is on-call for the specified schedule at a given time." - operationId: GetScheduleOnCallUser + description: Returns all Org Configs (name, description, and value). + operationId: ListOrgConfigs + responses: + "200": + content: + application/json: + schema: {$ref: "#/components/schemas/OrgConfigListResponse"} + description: OK + "400": {$ref: "#/components/responses/BadRequestResponse"} + "401": {$ref: "#/components/responses/UnauthorizedResponse"} + "403": {$ref: "#/components/responses/ForbiddenResponse"} + "429": {$ref: "#/components/responses/TooManyRequestsResponse"} + summary: List Org Configs + tags: [Organizations] + x-menu-order: 1 + "x-permission": + operator: OPEN + permissions: [] + /api/v2/org_configs/{org_config_name}: + get: + description: Return the name, description, and value of a specific Org Config. + operationId: GetOrgConfig + parameters: [$ref: "#/components/parameters/OrgConfigName"] + responses: + "200": + content: + application/json: + schema: {$ref: "#/components/schemas/OrgConfigGetResponse"} + description: OK + "400": {$ref: "#/components/responses/BadRequestResponse"} + "401": {$ref: "#/components/responses/UnauthorizedResponse"} + "403": {$ref: "#/components/responses/ForbiddenResponse"} + "404": {$ref: "#/components/responses/NotFoundResponse"} + "429": {$ref: "#/components/responses/TooManyRequestsResponse"} + summary: Get a specific Org Config value + tags: [Organizations] + x-menu-order: 2 + "x-permission": + operator: OPEN + permissions: [] + patch: + description: Update the value of a specific Org Config. + operationId: UpdateOrgConfig + parameters: [$ref: "#/components/parameters/OrgConfigName"] + requestBody: + content: + application/json: + schema: {$ref: "#/components/schemas/OrgConfigWriteRequest"} + required: true + responses: + "200": + content: + application/json: + schema: {$ref: "#/components/schemas/OrgConfigGetResponse"} + description: OK + "400": {$ref: "#/components/responses/BadRequestResponse"} + "401": {$ref: "#/components/responses/UnauthorizedResponse"} + "403": {$ref: "#/components/responses/ForbiddenResponse"} + "404": {$ref: "#/components/responses/NotFoundResponse"} + "429": {$ref: "#/components/responses/TooManyRequestsResponse"} + summary: Update a specific Org Config + tags: [Organizations] + x-menu-order: 3 + "x-permission": + operator: OR + permissions: + - org_management + /api/v2/org_connections: + get: + description: Returns a list of org connections. + operationId: ListOrgConnections parameters: - - description: "Specifies related resources to include in the response as a comma-separated list. Allowed value: `user`." + - description: The Org ID of the sink org. + example: "0879ce27-29a1-481f-a12e-bc2a48ec9ae1" in: query - name: include + name: sink_org_id + required: false schema: type: string - - description: The ID of the schedule. - in: path - name: schedule_id - required: true + - description: The Org ID of the source org. + example: "0879ce27-29a1-481f-a12e-bc2a48ec9ae1" + in: query + name: source_org_id + required: false schema: - example: 3653d3c6-0c75-11ea-ad28-fb5701eabc7d type: string - - description: Retrieves the on-call user at the given timestamp in RFC3339 format (for example, `2025-05-07T02:53:01Z` or `2025-05-07T02:53:01+00:00`). When using timezone offsets with `+` or `-`, ensure proper URL encoding (`+` should be encoded as `%2B`). Defaults to the current time if omitted. + - description: The limit of number of entries you want to return. Default is 1000. + example: 1000 in: query - name: filter[at_ts] + name: limit + required: false schema: - example: "2025-05-07T02:53:01Z" - type: string + format: int64 + type: integer + - description: The pagination offset which you want to query from. Default is 0. + example: 0 + in: query + name: offset + required: false + schema: + format: int64 + type: integer responses: "200": content: application/json: schema: - $ref: "#/components/schemas/Shift" + $ref: "#/components/schemas/OrgConnectionListResponse" + description: OK + "401": + $ref: "#/components/responses/UnauthorizedResponse" + "403": + $ref: "#/components/responses/ForbiddenResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: + - org_connections_read + summary: List Org Connections + tags: ["Org Connections"] + x-menu-order: 100 + "x-permission": + operator: OR + permissions: + - org_connections_read + post: + description: Create a new org connection between the current org and a target org. + operationId: CreateOrgConnections + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/OrgConnectionCreateRequest" + required: true + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/OrgConnectionResponse" description: OK "400": $ref: "#/components/responses/BadRequestResponse" @@ -99051,43 +100835,71 @@ paths: $ref: "#/components/responses/ForbiddenResponse" "404": $ref: "#/components/responses/NotFoundResponse" + "409": + $ref: "#/components/responses/ConflictResponse" "429": $ref: "#/components/responses/TooManyRequestsResponse" security: - apiKeyAuth: [] appKeyAuth: [] - - AuthZ: [] - summary: Get scheduled on-call user - tags: - - On-Call - x-menu-order: 11 + - AuthZ: + - org_connections_write + summary: Create Org Connection + tags: ["Org Connections"] + x-codegen-request-body-name: body + x-menu-order: 101 "x-permission": - operator: AND + operator: OR permissions: - - on_call_read - /api/v2/on-call/teams/{team_id}/on-call: - get: - description: Get a team's on-call users at a given time - operationId: GetTeamOnCallUsers + - org_connections_write + /api/v2/org_connections/{connection_id}: + delete: + description: Delete an existing org connection. + operationId: DeleteOrgConnections parameters: - - description: "Comma-separated list of included relationships to be returned. Allowed values: `responders`, `escalations`, `escalations.responders`." - in: query - name: include - schema: - type: string - - description: The team ID - in: path - name: team_id - required: true - schema: - example: 27590dae-47be-4a7d-9abf-8f4e45124020 - type: string + - $ref: "#/components/parameters/OrgConnectionId" + responses: + "200": + description: OK + "400": + $ref: "#/components/responses/BadRequestResponse" + "401": + $ref: "#/components/responses/UnauthorizedResponse" + "403": + $ref: "#/components/responses/ForbiddenResponse" + "404": + $ref: "#/components/responses/NotFoundResponse" + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + security: + - apiKeyAuth: [] + appKeyAuth: [] + - AuthZ: + - org_connections_write + summary: Delete Org Connection + tags: ["Org Connections"] + x-menu-order: 103 + "x-permission": + operator: OR + permissions: + - org_connections_write + patch: + description: Update an existing org connection. + operationId: UpdateOrgConnections + parameters: + - $ref: "#/components/parameters/OrgConnectionId" + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/OrgConnectionUpdateRequest" + required: true responses: "200": content: application/json: schema: - $ref: "#/components/schemas/TeamOnCallResponders" + $ref: "#/components/schemas/OrgConnectionResponse" description: OK "400": $ref: "#/components/responses/BadRequestResponse" @@ -99102,761 +100914,1241 @@ paths: security: - apiKeyAuth: [] appKeyAuth: [] - - AuthZ: [] - summary: Get team on-call users - tags: - - On-Call - x-menu-order: 12 + - AuthZ: + - org_connections_write + summary: Update Org Connection + tags: ["Org Connections"] + x-menu-order: 102 "x-permission": - operator: AND + operator: OR permissions: - - on_call_read - /api/v2/on-call/teams/{team_id}/routing-rules: + - org_connections_write + /api/v2/org_group_memberships: get: - description: Get a team's On-Call routing rules - operationId: GetOnCallTeamRoutingRules + description: >- + List organization group memberships. Filter by org group ID or org UUID. At least one of `filter[org_group_id]` or `filter[org_uuid]` must be provided. When filtering by org UUID, returns a single-item list with the membership for that org. + operationId: ListOrgGroupMemberships parameters: - - description: The team ID - in: path - name: team_id - required: true - schema: - example: 27590dae-47be-4a7d-9abf-8f4e45124020 - type: string - - description: "Comma-separated list of included relationships to be returned. Allowed values: `rules`, `rules.policy`." - in: query - name: include - schema: - type: string + - $ref: "#/components/parameters/OrgGroupMembershipFilterOrgGroupId" + - $ref: "#/components/parameters/OrgGroupMembershipFilterOrgUuid" + - $ref: "#/components/parameters/OrgGroupPageNumber" + - $ref: "#/components/parameters/OrgGroupPageSize" + - $ref: "#/components/parameters/MembershipSort" + responses: + "200": + content: + application/json: + examples: + default: + value: + data: + - attributes: + created_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-15T10:30:00Z" + org_name: "Acme Corp" + org_site: "datadoghq.com" + org_uuid: "c3d4e5f6-a7b8-9012-cdef-012345678901" + id: "f1e2d3c4-b5a6-7890-1234-567890abcdef" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + type: org_group_memberships + meta: + page: + total_count: 1 + schema: + $ref: "#/components/schemas/OrgGroupMembershipListResponse" + description: OK + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request + "401": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: List org group memberships + tags: [Org Groups] + x-menu-order: 6 + "x-permission": + operator: OR + permissions: + - org_group_read + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + /api/v2/org_group_memberships/bulk: + patch: + description: >- + Move a batch of organizations from one org group to another. This is an atomic operation. Maximum 100 orgs per request. + operationId: BulkUpdateOrgGroupMemberships + requestBody: + content: + application/json: + examples: + default: + value: + data: + attributes: + orgs: + - org_site: "datadoghq.com" + org_uuid: "c3d4e5f6-a7b8-9012-cdef-012345678901" + relationships: + source_org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + target_org_group: + data: + id: "d4e5f6a7-b890-1234-cdef-567890abcdef" + type: org_groups + type: org_group_membership_bulk_updates + schema: + $ref: "#/components/schemas/OrgGroupMembershipBulkUpdateRequest" + required: true responses: "200": + content: + application/json: + examples: + default: + value: + data: + - attributes: + created_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-16T14:00:00Z" + org_name: "Acme Corp" + org_site: "datadoghq.com" + org_uuid: "c3d4e5f6-a7b8-9012-cdef-012345678901" + id: "f1e2d3c4-b5a6-7890-1234-567890abcdef" + relationships: + org_group: + data: + id: "d4e5f6a7-b890-1234-cdef-567890abcdef" + type: org_groups + type: org_group_memberships + schema: + $ref: "#/components/schemas/OrgGroupMembershipListResponse" + description: OK + "400": content: application/json: schema: - $ref: "#/components/schemas/TeamRoutingRules" + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request + "401": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Not Found + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: Bulk update org group memberships + tags: [Org Groups] + x-menu-order: 9 + "x-permission": + operator: OR + permissions: + - org_group_write + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + /api/v2/org_group_memberships/{org_group_membership_id}: + get: + description: Get a specific organization group membership by its ID. + operationId: GetOrgGroupMembership + parameters: + - $ref: "#/components/parameters/OrgGroupMembershipId" + responses: + "200": + content: + application/json: + examples: + default: + value: + data: + attributes: + created_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-15T10:30:00Z" + org_name: "Acme Corp" + org_site: "datadoghq.com" + org_uuid: "c3d4e5f6-a7b8-9012-cdef-012345678901" + id: "f1e2d3c4-b5a6-7890-1234-567890abcdef" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + type: org_group_memberships + schema: + $ref: "#/components/schemas/OrgGroupMembershipResponse" description: OK + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request + "401": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Not Found "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: [] - summary: Get On-Call team routing rules - tags: - - On-Call - x-menu-order: 9 + summary: Get an org group membership + tags: [Org Groups] + x-menu-order: 7 "x-permission": - operator: AND + operator: OR permissions: - - on_call_read - put: - description: Set a team's On-Call routing rules - operationId: SetOnCallTeamRoutingRules + - org_group_read + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + patch: + description: Move an organization to a different org group by updating its membership. + operationId: UpdateOrgGroupMembership parameters: - - description: The team ID - in: path - name: team_id - required: true - schema: - example: 27590dae-47be-4a7d-9abf-8f4e45124020 - type: string - - description: "Comma-separated list of included relationships to be returned. Allowed values: `rules`, `rules.policy`." - in: query - name: include - schema: - type: string + - $ref: "#/components/parameters/OrgGroupMembershipId" requestBody: content: application/json: + examples: + default: + value: + data: + id: "f1e2d3c4-b5a6-7890-1234-567890abcdef" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + type: org_group_memberships schema: - $ref: "#/components/schemas/TeamRoutingRulesRequest" + $ref: "#/components/schemas/OrgGroupMembershipUpdateRequest" required: true responses: "200": content: application/json: + examples: + default: + value: + data: + attributes: + created_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-16T14:00:00Z" + org_name: "Acme Corp" + org_site: "datadoghq.com" + org_uuid: "c3d4e5f6-a7b8-9012-cdef-012345678901" + id: "f1e2d3c4-b5a6-7890-1234-567890abcdef" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + type: org_group_memberships schema: - $ref: "#/components/schemas/TeamRoutingRules" + $ref: "#/components/schemas/OrgGroupMembershipResponse" description: OK + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request + "401": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Not Found "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: [] - summary: Set On-Call team routing rules - tags: - - On-Call - x-menu-order: 10 + summary: Update an org group membership + tags: [Org Groups] + x-menu-order: 8 "x-permission": - operator: AND + operator: OR permissions: - - on_call_write - /api/v2/on-call/users/{user_id}/notification-channels: + - org_group_write + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + /api/v2/org_group_policies: get: - description: List the notification channels for a user. The authenticated user must be the target user or have the `on_call_admin` permission - operationId: ListUserNotificationChannels + description: List policies for an organization group. Requires a filter on org group ID. + operationId: ListOrgGroupPolicies parameters: - - description: The user ID - in: path - name: user_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string + - $ref: "#/components/parameters/OrgGroupPolicyFilterOrgGroupId" + - $ref: "#/components/parameters/OrgGroupPolicyFilterPolicyName" + - $ref: "#/components/parameters/OrgGroupPageNumber" + - $ref: "#/components/parameters/OrgGroupPageSize" + - $ref: "#/components/parameters/PolicySort" responses: "200": content: - "application/json": + application/json: + examples: + default: + value: + data: + - attributes: + content: + value: "UTC" + enforced_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-15T10:30:00Z" + policy_name: "monitor_timezone" + id: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + type: org_group_policies + meta: + page: + total_count: 1 schema: - $ref: "#/components/schemas/ListNotificationChannelsResponse" + $ref: "#/components/schemas/OrgGroupPolicyListResponse" description: OK "400": - $ref: "#/components/responses/BadRequestResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" - "404": - $ref: "#/components/responses/NotFoundResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: [] - summary: List On-Call notification channels for a user - tags: - - On-Call - x-menu-order: 15 + summary: List org group policies + tags: [Org Groups] + x-menu-order: 10 "x-permission": - operator: AND + operator: OR permissions: - - on_call_read + - org_group_read + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). post: - description: Create a new notification channel for a user. The authenticated user must be the target user or have the `on_call_admin` permission - operationId: CreateUserNotificationChannel - parameters: - - description: The user ID - in: path - name: user_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string + description: Create a new policy for an organization group. + operationId: CreateOrgGroupPolicy requestBody: content: - "application/json": + application/json: + examples: + default: + value: + data: + attributes: + content: + value: "UTC" + policy_name: "monitor_timezone" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + type: org_group_policies schema: - $ref: "#/components/schemas/CreateUserNotificationChannelRequest" + $ref: "#/components/schemas/OrgGroupPolicyCreateRequest" required: true responses: "201": content: - "application/json": + application/json: + examples: + default: + value: + data: + attributes: + content: + value: "UTC" + enforced_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-15T10:30:00Z" + policy_name: "monitor_timezone" + id: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + type: org_group_policies schema: - $ref: "#/components/schemas/NotificationChannel" + $ref: "#/components/schemas/OrgGroupPolicyResponse" description: Created "400": - $ref: "#/components/responses/BadRequestResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" - "404": - $ref: "#/components/responses/NotFoundResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden + "409": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Conflict "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: [] - summary: Create an On-Call notification channel for a user - tags: - - On-Call - x-menu-order: 16 + summary: Create an org group policy + tags: [Org Groups] + x-menu-order: 11 "x-permission": - operator: AND + operator: OR permissions: - - on_call_respond - /api/v2/on-call/users/{user_id}/notification-channels/{channel_id}: + - org_group_write + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + /api/v2/org_group_policies/{org_group_policy_id}: delete: - description: Delete a notification channel for a user. The authenticated user must be the target user or have the `on_call_admin` permission - operationId: DeleteUserNotificationChannel + description: Delete an organization group policy by its ID. + operationId: DeleteOrgGroupPolicy parameters: - - description: The user ID - in: path - name: user_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string - - description: The channel ID - in: path - name: channel_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string + - $ref: "#/components/parameters/OrgGroupPolicyId" responses: "204": description: No Content "400": - $ref: "#/components/responses/BadRequestResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden "404": - $ref: "#/components/responses/NotFoundResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Not Found "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: [] - summary: Delete an On-Call notification channel for a user - tags: - - On-Call + summary: Delete an org group policy + tags: [Org Groups] x-menu-order: 13 "x-permission": - operator: AND + operator: OR permissions: - - on_call_respond - get: - description: Get a notification channel for a user. The authenticated user must be the target user or have the `on_call_admin` permission - operationId: GetUserNotificationChannel + - org_group_write + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + patch: + description: Update the content of an existing organization group policy. + operationId: UpdateOrgGroupPolicy parameters: - - description: The user ID - in: path - name: user_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string - - description: The channel ID - in: path - name: channel_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string + - $ref: "#/components/parameters/OrgGroupPolicyId" + requestBody: + content: + application/json: + examples: + default: + value: + data: + attributes: + content: + value: "US/Eastern" + id: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + type: org_group_policies + schema: + $ref: "#/components/schemas/OrgGroupPolicyUpdateRequest" + required: true responses: "200": content: - "application/json": + application/json: + examples: + default: + value: + data: + attributes: + content: + value: "US/Eastern" + enforced_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-16T14:00:00Z" + policy_name: "monitor_timezone" + id: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + type: org_group_policies schema: - $ref: "#/components/schemas/NotificationChannel" + $ref: "#/components/schemas/OrgGroupPolicyResponse" description: OK "400": - $ref: "#/components/responses/BadRequestResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden "404": - $ref: "#/components/responses/NotFoundResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Not Found "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: [] - summary: Get an On-Call notification channel for a user - tags: - - On-Call - x-menu-order: 14 + summary: Update an org group policy + tags: [Org Groups] + x-menu-order: 12 "x-permission": - operator: AND + operator: OR permissions: - - on_call_read - /api/v2/on-call/users/{user_id}/notification-rules: + - org_group_write + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + /api/v2/org_group_policy_configs: get: - description: List the notification rules for a user. The authenticated user must be the target user or have the `on_call_admin` permission - operationId: ListUserNotificationRules - parameters: - - description: "Comma-separated list of included relationships to be returned. Allowed values: `channel`." - in: query - name: include - schema: - type: string - - description: The user ID - in: path - name: user_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string + description: List all org configs that are eligible to be used as organization group policies. + operationId: ListOrgGroupPolicyConfigs responses: "200": content: - "application/json": + application/json: + examples: + default: + value: + data: + - attributes: + allowed_values: ["UTC", "US/Eastern", "US/Pacific"] + default_value: "UTC" + description: "The default timezone for monitors." + name: "monitor_timezone" + value_type: "string" + id: "monitor_timezone" + type: org_group_policy_configs schema: - $ref: "#/components/schemas/ListOnCallNotificationRulesResponse" + $ref: "#/components/schemas/OrgGroupPolicyConfigListResponse" description: OK - "400": - $ref: "#/components/responses/BadRequestResponse" "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" - "404": - $ref: "#/components/responses/NotFoundResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: [] - summary: List On-Call notification rules for a user - tags: - - On-Call + summary: List org group policy configs + tags: [Org Groups] x-menu-order: 18 "x-permission": - operator: AND + operator: OR permissions: - - on_call_read - post: - description: Create a new notification rule for a user. The authenticated user must be the target user or have the `on_call_admin` permission - operationId: CreateUserNotificationRule + - org_group_read + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + /api/v2/org_group_policy_overrides: + get: + description: >- + List policy overrides for an organization group. Requires a filter on org group ID. Optionally filter by policy ID. + operationId: ListOrgGroupPolicyOverrides parameters: - - description: The user ID - in: path - name: user_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string - requestBody: - content: - "application/json": - schema: - $ref: "#/components/schemas/CreateOnCallNotificationRuleRequest" - required: true + - $ref: "#/components/parameters/OrgGroupPolicyOverrideFilterOrgGroupId" + - $ref: "#/components/parameters/OrgGroupPolicyOverrideFilterPolicyId" + - $ref: "#/components/parameters/OrgGroupPageNumber" + - $ref: "#/components/parameters/OrgGroupPageSize" + - $ref: "#/components/parameters/OverrideSort" responses: - "201": + "200": content: - "application/json": + application/json: + examples: + default: + value: + data: + - attributes: + created_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-15T10:30:00Z" + org_site: "datadoghq.com" + org_uuid: "c3d4e5f6-a7b8-9012-cdef-012345678901" + id: "9f8e7d6c-5b4a-3210-fedc-ba0987654321" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + org_group_policy: + data: + id: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + type: org_group_policies + type: org_group_policy_overrides + meta: + page: + total_count: 1 schema: - $ref: "#/components/schemas/OnCallNotificationRule" - description: Created + $ref: "#/components/schemas/OrgGroupPolicyOverrideListResponse" + description: OK "400": - $ref: "#/components/responses/BadRequestResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" - "404": - $ref: "#/components/responses/NotFoundResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: [] - summary: Create an On-Call notification rule for a user - tags: - - On-Call - x-menu-order: 17 + summary: List org group policy overrides + tags: [Org Groups] + x-menu-order: 14 "x-permission": - operator: AND + operator: OR permissions: - - on_call_respond - /api/v2/on-call/users/{user_id}/notification-rules/{rule_id}: - delete: - description: Delete a notification rule for a user. The authenticated user must be the target user or have the `on_call_admin` permission - operationId: DeleteUserNotificationRule - parameters: - - description: The user ID - in: path - name: user_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string - - description: The rule ID - in: path - name: rule_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string + - org_group_read + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + post: + description: Create a new policy override for an organization within an org group. + operationId: CreateOrgGroupPolicyOverride + requestBody: + content: + application/json: + examples: + default: + value: + data: + attributes: + org_site: "datadoghq.com" + org_uuid: "c3d4e5f6-a7b8-9012-cdef-012345678901" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + org_group_policy: + data: + id: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + type: org_group_policies + type: org_group_policy_overrides + schema: + $ref: "#/components/schemas/OrgGroupPolicyOverrideCreateRequest" + required: true responses: - "204": - description: No Content + "201": + content: + application/json: + examples: + default: + value: + data: + attributes: + created_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-15T10:30:00Z" + org_site: "datadoghq.com" + org_uuid: "c3d4e5f6-a7b8-9012-cdef-012345678901" + id: "9f8e7d6c-5b4a-3210-fedc-ba0987654321" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + org_group_policy: + data: + id: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + type: org_group_policies + type: org_group_policy_overrides + schema: + $ref: "#/components/schemas/OrgGroupPolicyOverrideResponse" + description: Created "400": - $ref: "#/components/responses/BadRequestResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" - "404": - $ref: "#/components/responses/NotFoundResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden + "409": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Conflict "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: [] - summary: Delete an On-Call notification rule for a user - tags: - - On-Call - x-menu-order: 19 + summary: Create an org group policy override + tags: [Org Groups] + x-menu-order: 15 "x-permission": operator: OR permissions: - - on_call_respond - get: - description: Get a notification rule for a user. The authenticated user must be the target user or have the `on_call_admin` permission - operationId: GetUserNotificationRule + - org_group_write + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + /api/v2/org_group_policy_overrides/{org_group_policy_override_id}: + delete: + description: Delete an organization group policy override by its ID. + operationId: DeleteOrgGroupPolicyOverride parameters: - - description: The user ID - in: path - name: user_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string - - description: The rule ID - in: path - name: rule_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string - - description: "Comma-separated list of included relationships to be returned. Allowed values: `channel`." - in: query - name: include - schema: - type: string + - $ref: "#/components/parameters/OrgGroupPolicyOverrideId" responses: - "200": + "204": + description: No Content + "400": content: - "application/json": + application/json: schema: - $ref: "#/components/schemas/OnCallNotificationRule" - description: OK - "400": - $ref: "#/components/responses/BadRequestResponse" + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden "404": - $ref: "#/components/responses/NotFoundResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Not Found "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: [] - summary: Get an On-Call notification rule for a user - tags: - - On-Call - x-menu-order: 20 + summary: Delete an org group policy override + tags: [Org Groups] + x-menu-order: 17 "x-permission": operator: OR permissions: - - on_call_read - put: - description: Update a notification rule for a user. The authenticated user must be the target user or have the `on_call_admin` permission - operationId: UpdateUserNotificationRule + - org_group_write + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + patch: + description: Update an existing organization group policy override. + operationId: UpdateOrgGroupPolicyOverride parameters: - - description: The user ID - in: path - name: user_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string - - description: The rule ID - in: path - name: rule_id - required: true - schema: - example: 00000000-0000-0000-0000-000000000000 - type: string - - description: "Comma-separated list of included relationships to be returned. Allowed values: `channel`." - in: query - name: include - schema: - type: string + - $ref: "#/components/parameters/OrgGroupPolicyOverrideId" requestBody: content: - "application/json": + application/json: + examples: + default: + value: + data: + attributes: + org_site: "datadoghq.com" + org_uuid: "c3d4e5f6-a7b8-9012-cdef-012345678901" + id: "9f8e7d6c-5b4a-3210-fedc-ba0987654321" + type: org_group_policy_overrides schema: - $ref: "#/components/schemas/UpdateOnCallNotificationRuleRequest" + $ref: "#/components/schemas/OrgGroupPolicyOverrideUpdateRequest" required: true responses: "200": content: - "application/json": + application/json: + examples: + default: + value: + data: + attributes: + created_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-16T14:00:00Z" + org_site: "datadoghq.com" + org_uuid: "c3d4e5f6-a7b8-9012-cdef-012345678901" + id: "9f8e7d6c-5b4a-3210-fedc-ba0987654321" + relationships: + org_group: + data: + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + org_group_policy: + data: + id: "1a2b3c4d-5e6f-7890-abcd-ef0123456789" + type: org_group_policies + type: org_group_policy_overrides schema: - $ref: "#/components/schemas/OnCallNotificationRule" + $ref: "#/components/schemas/OrgGroupPolicyOverrideResponse" description: OK "400": - $ref: "#/components/responses/BadRequestResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden "404": - $ref: "#/components/responses/NotFoundResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Not Found "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: [] - summary: Update an On-Call notification rule for a user - tags: - - On-Call - x-menu-order: 21 + summary: Update an org group policy override + tags: [Org Groups] + x-menu-order: 16 "x-permission": operator: OR permissions: - - on_call_read - /api/v2/org_configs: + - org_group_write + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + /api/v2/org_groups: get: - description: Returns all Org Configs (name, description, and value). - operationId: ListOrgConfigs + description: List all organization groups that the requesting organization has access to. + operationId: ListOrgGroups + parameters: + - $ref: "#/components/parameters/OrgGroupPageNumber" + - $ref: "#/components/parameters/OrgGroupPageSize" + - $ref: "#/components/parameters/OrgGroupSort" + - $ref: "#/components/parameters/OrgGroupInclude" responses: "200": content: application/json: - schema: {$ref: "#/components/schemas/OrgConfigListResponse"} + examples: + default: + value: + data: + - attributes: + created_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-15T10:30:00Z" + name: "My Org Group" + owner_org_site: "datadoghq.com" + owner_org_uuid: "b2c3d4e5-f6a7-8901-bcde-f01234567890" + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + meta: + page: + total_count: 1 + schema: + $ref: "#/components/schemas/OrgGroupListResponse" description: OK - "400": {$ref: "#/components/responses/BadRequestResponse"} - "401": {$ref: "#/components/responses/UnauthorizedResponse"} - "403": {$ref: "#/components/responses/ForbiddenResponse"} - "429": {$ref: "#/components/responses/TooManyRequestsResponse"} - summary: List Org Configs - tags: [Organizations] - x-menu-order: 1 - "x-permission": - operator: OPEN - permissions: [] - /api/v2/org_configs/{org_config_name}: - get: - description: Return the name, description, and value of a specific Org Config. - operationId: GetOrgConfig - parameters: [$ref: "#/components/parameters/OrgConfigName"] - responses: - "200": + "400": content: application/json: - schema: {$ref: "#/components/schemas/OrgConfigGetResponse"} - description: OK - "400": {$ref: "#/components/responses/BadRequestResponse"} - "401": {$ref: "#/components/responses/UnauthorizedResponse"} - "403": {$ref: "#/components/responses/ForbiddenResponse"} - "404": {$ref: "#/components/responses/NotFoundResponse"} - "429": {$ref: "#/components/responses/TooManyRequestsResponse"} - summary: Get a specific Org Config value - tags: [Organizations] - x-menu-order: 2 + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request + "401": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden + "429": + $ref: "#/components/responses/TooManyRequestsResponse" + summary: List org groups + tags: [Org Groups] + x-menu-order: 1 "x-permission": - operator: OPEN - permissions: [] - patch: - description: Update the value of a specific Org Config. - operationId: UpdateOrgConfig - parameters: [$ref: "#/components/parameters/OrgConfigName"] + operator: OR + permissions: + - org_group_read + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + post: + description: Create a new organization group. + operationId: CreateOrgGroup requestBody: content: application/json: - schema: {$ref: "#/components/schemas/OrgConfigWriteRequest"} + examples: + default: + value: + data: + attributes: + name: "My Org Group" + type: org_groups + schema: + $ref: "#/components/schemas/OrgGroupCreateRequest" required: true responses: - "200": + "201": content: application/json: - schema: {$ref: "#/components/schemas/OrgConfigGetResponse"} - description: OK - "400": {$ref: "#/components/responses/BadRequestResponse"} - "401": {$ref: "#/components/responses/UnauthorizedResponse"} - "403": {$ref: "#/components/responses/ForbiddenResponse"} - "404": {$ref: "#/components/responses/NotFoundResponse"} - "429": {$ref: "#/components/responses/TooManyRequestsResponse"} - summary: Update a specific Org Config - tags: [Organizations] - x-menu-order: 3 - "x-permission": - operator: OR - permissions: - - org_management - /api/v2/org_connections: - get: - description: Returns a list of org connections. - operationId: ListOrgConnections - parameters: - - description: The Org ID of the sink org. - example: "0879ce27-29a1-481f-a12e-bc2a48ec9ae1" - in: query - name: sink_org_id - required: false - schema: - type: string - - description: The Org ID of the source org. - example: "0879ce27-29a1-481f-a12e-bc2a48ec9ae1" - in: query - name: source_org_id - required: false - schema: - type: string - - description: The limit of number of entries you want to return. Default is 1000. - example: 1000 - in: query - name: limit - required: false - schema: - format: int64 - type: integer - - description: The pagination offset which you want to query from. Default is 0. - example: 0 - in: query - name: offset - required: false - schema: - format: int64 - type: integer - responses: - "200": + examples: + default: + value: + data: + attributes: + created_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-15T10:30:00Z" + name: "My Org Group" + owner_org_site: "datadoghq.com" + owner_org_uuid: "b2c3d4e5-f6a7-8901-bcde-f01234567890" + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + schema: + $ref: "#/components/schemas/OrgGroupResponse" + description: Created + "400": content: application/json: schema: - $ref: "#/components/schemas/OrgConnectionListResponse" - description: OK + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden + "409": + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Conflict "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: - - org_connections_read - summary: List Org Connections - tags: ["Org Connections"] - x-menu-order: 100 + summary: Create an org group + tags: [Org Groups] + x-menu-order: 3 "x-permission": operator: OR permissions: - - org_connections_read - post: - description: Create a new org connection between the current org and a target org. - operationId: CreateOrgConnections - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/OrgConnectionCreateRequest" - required: true + - org_group_write + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + /api/v2/org_groups/{org_group_id}: + delete: + description: Delete an organization group by its ID. + operationId: DeleteOrgGroup + parameters: + - $ref: "#/components/parameters/OrgGroupId" responses: - "200": + "204": + description: No Content + "400": content: application/json: schema: - $ref: "#/components/schemas/OrgConnectionResponse" - description: OK - "400": - $ref: "#/components/responses/BadRequestResponse" + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden "404": - $ref: "#/components/responses/NotFoundResponse" - "409": - $ref: "#/components/responses/ConflictResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Not Found "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: - - org_connections_write - summary: Create Org Connection - tags: ["Org Connections"] - x-codegen-request-body-name: body - x-menu-order: 101 + summary: Delete an org group + tags: [Org Groups] + x-menu-order: 5 "x-permission": operator: OR permissions: - - org_connections_write - /api/v2/org_connections/{connection_id}: - delete: - description: Delete an existing org connection. - operationId: DeleteOrgConnections + - org_group_write + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). + get: + description: Get a specific organization group by its ID. + operationId: GetOrgGroup parameters: - - $ref: "#/components/parameters/OrgConnectionId" + - $ref: "#/components/parameters/OrgGroupId" responses: "200": + content: + application/json: + examples: + default: + value: + data: + attributes: + created_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-15T10:30:00Z" + name: "My Org Group" + owner_org_site: "datadoghq.com" + owner_org_uuid: "b2c3d4e5-f6a7-8901-bcde-f01234567890" + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups + schema: + $ref: "#/components/schemas/OrgGroupResponse" description: OK "400": - $ref: "#/components/responses/BadRequestResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden "404": - $ref: "#/components/responses/NotFoundResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Not Found "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: - - org_connections_write - summary: Delete Org Connection - tags: ["Org Connections"] - x-menu-order: 103 + summary: Get an org group + tags: [Org Groups] + x-menu-order: 2 "x-permission": operator: OR permissions: - - org_connections_write + - org_group_read + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). patch: - description: Update an existing org connection. - operationId: UpdateOrgConnections + description: Update the name of an existing organization group. + operationId: UpdateOrgGroup parameters: - - $ref: "#/components/parameters/OrgConnectionId" + - $ref: "#/components/parameters/OrgGroupId" requestBody: content: application/json: + examples: + default: + value: + data: + attributes: + name: "Updated Org Group Name" + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups schema: - $ref: "#/components/schemas/OrgConnectionUpdateRequest" + $ref: "#/components/schemas/OrgGroupUpdateRequest" required: true responses: "200": content: application/json: + examples: + default: + value: + data: + attributes: + created_at: "2024-01-15T10:30:00Z" + modified_at: "2024-01-16T14:00:00Z" + name: "Updated Org Group Name" + owner_org_site: "datadoghq.com" + owner_org_uuid: "b2c3d4e5-f6a7-8901-bcde-f01234567890" + id: "a1b2c3d4-e5f6-7890-abcd-ef0123456789" + type: org_groups schema: - $ref: "#/components/schemas/OrgConnectionResponse" + $ref: "#/components/schemas/OrgGroupResponse" description: OK "400": - $ref: "#/components/responses/BadRequestResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Bad Request "401": - $ref: "#/components/responses/UnauthorizedResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Unauthorized "403": - $ref: "#/components/responses/ForbiddenResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Forbidden "404": - $ref: "#/components/responses/NotFoundResponse" + content: + application/json: + schema: + $ref: "#/components/schemas/JSONAPIErrorResponse" + description: Not Found "429": $ref: "#/components/responses/TooManyRequestsResponse" - security: - - apiKeyAuth: [] - appKeyAuth: [] - - AuthZ: - - org_connections_write - summary: Update Org Connection - tags: ["Org Connections"] - x-menu-order: 102 + summary: Update an org group + tags: [Org Groups] + x-menu-order: 4 "x-permission": operator: OR permissions: - - org_connections_write + - org_group_write + x-unstable: |- + **Note**: This endpoint is in preview and is subject to change. + If you have any feedback, contact [Datadog support](https://docs.datadoghq.com/help/). /api/v2/permissions: get: description: |- @@ -118278,6 +120570,9 @@ tags: - description: |- Manage connections between organizations. Org connections allow for controlled sharing of data between different Datadog organizations. See the [Cross-Organization Visibiltiy](https://docs.datadoghq.com/account_management/org_settings/cross_org_visibility/) page for more information. name: Org Connections + - description: >- + Manage organization groups, memberships, policies, policy overrides, and policy configurations. + name: Org Groups - description: Create, edit, and manage your organizations. Read more about [multi-org accounts](https://docs.datadoghq.com/account_management/multi_organization). externalDocs: description: Find out more at diff --git a/data/api/v2/translate_actions.json b/data/api/v2/translate_actions.json index 58069673954..9b4e138fb10 100644 --- a/data/api/v2/translate_actions.json +++ b/data/api/v2/translate_actions.json @@ -2779,6 +2779,94 @@ "request_description": "", "request_schema_description": "Request to update an org connection." }, + "ListOrgGroupMemberships": { + "description": "List organization group memberships. Filter by org group ID or org UUID. At least one of `filter[org_group_id]` or `filter[org_uuid]` must be provided. When filtering by org UUID, returns a single-item list with the membership for that org.", + "summary": "List org group memberships" + }, + "BulkUpdateOrgGroupMemberships": { + "description": "Move a batch of organizations from one org group to another. This is an atomic operation. Maximum 100 orgs per request.", + "summary": "Bulk update org group memberships", + "request_description": "", + "request_schema_description": "Request to bulk update org group memberships." + }, + "GetOrgGroupMembership": { + "description": "Get a specific organization group membership by its ID.", + "summary": "Get an org group membership" + }, + "UpdateOrgGroupMembership": { + "description": "Move an organization to a different org group by updating its membership.", + "summary": "Update an org group membership", + "request_description": "", + "request_schema_description": "Request to update an org group membership." + }, + "ListOrgGroupPolicies": { + "description": "List policies for an organization group. Requires a filter on org group ID.", + "summary": "List org group policies" + }, + "CreateOrgGroupPolicy": { + "description": "Create a new policy for an organization group.", + "summary": "Create an org group policy", + "request_description": "", + "request_schema_description": "Request to create an org group policy." + }, + "DeleteOrgGroupPolicy": { + "description": "Delete an organization group policy by its ID.", + "summary": "Delete an org group policy" + }, + "UpdateOrgGroupPolicy": { + "description": "Update the content of an existing organization group policy.", + "summary": "Update an org group policy", + "request_description": "", + "request_schema_description": "Request to update an org group policy." + }, + "ListOrgGroupPolicyConfigs": { + "description": "List all org configs that are eligible to be used as organization group policies.", + "summary": "List org group policy configs" + }, + "ListOrgGroupPolicyOverrides": { + "description": "List policy overrides for an organization group. Requires a filter on org group ID. Optionally filter by policy ID.", + "summary": "List org group policy overrides" + }, + "CreateOrgGroupPolicyOverride": { + "description": "Create a new policy override for an organization within an org group.", + "summary": "Create an org group policy override", + "request_description": "", + "request_schema_description": "Request to create an org group policy override." + }, + "DeleteOrgGroupPolicyOverride": { + "description": "Delete an organization group policy override by its ID.", + "summary": "Delete an org group policy override" + }, + "UpdateOrgGroupPolicyOverride": { + "description": "Update an existing organization group policy override.", + "summary": "Update an org group policy override", + "request_description": "", + "request_schema_description": "Request to update an org group policy override." + }, + "ListOrgGroups": { + "description": "List all organization groups that the requesting organization has access to.", + "summary": "List org groups" + }, + "CreateOrgGroup": { + "description": "Create a new organization group.", + "summary": "Create an org group", + "request_description": "", + "request_schema_description": "Request to create an org group." + }, + "DeleteOrgGroup": { + "description": "Delete an organization group by its ID.", + "summary": "Delete an org group" + }, + "GetOrgGroup": { + "description": "Get a specific organization group by its ID.", + "summary": "Get an org group" + }, + "UpdateOrgGroup": { + "description": "Update the name of an existing organization group.", + "summary": "Update an org group", + "request_description": "", + "request_schema_description": "Request to update an org group." + }, "ListPermissions": { "description": "Returns a list of all permissions, including name, description, and ID.", "summary": "List permissions" diff --git a/data/api/v2/translate_tags.json b/data/api/v2/translate_tags.json index c01ae27a0a6..8fd1cb66b07 100644 --- a/data/api/v2/translate_tags.json +++ b/data/api/v2/translate_tags.json @@ -271,6 +271,10 @@ "name": "Org Connections", "description": "Manage connections between organizations. Org connections allow for controlled sharing of data between different Datadog organizations. See the [Cross-Organization Visibiltiy](https://docs.datadoghq.com/account_management/org_settings/cross_org_visibility/) page for more information." }, + "org-groups": { + "name": "Org Groups", + "description": "Manage organization groups, memberships, policies, policy overrides, and policy configurations." + }, "organizations": { "name": "Organizations", "description": "Create, edit, and manage your organizations. Read more about [multi-org accounts](https://docs.datadoghq.com/account_management/multi_organization)."