Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
100 changes: 41 additions & 59 deletions logstash-core/lib/logstash/java_pipeline.rb
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@ module LogStash; class JavaPipeline < AbstractPipeline
attr_reader \
:worker_threads,
:input_threads,
:events_consumed,
:events_filtered,
:started_at,
:thread

Expand All @@ -61,27 +59,7 @@ def finish_initialization

@drain_queue = settings.get_value("queue.drain") || settings.get("queue.type") == MEMORY

@events_filtered = java.util.concurrent.atomic.LongAdder.new
@events_consumed = java.util.concurrent.atomic.LongAdder.new

@input_threads = []
# @ready requires thread safety since it is typically polled from outside the pipeline thread
@ready = Concurrent::AtomicBoolean.new(false)
@running = Concurrent::AtomicBoolean.new(false)
@flushing = java.util.concurrent.atomic.AtomicBoolean.new(false)
@flushRequested = java.util.concurrent.atomic.AtomicBoolean.new(false)
@shutdownRequested = java.util.concurrent.atomic.AtomicBoolean.new(false)
@crash_detected = Concurrent::AtomicBoolean.new(false)
@outputs_registered = Concurrent::AtomicBoolean.new(false)

# @finished_execution signals that the pipeline thread has finished its execution
# regardless of any exceptions; it will always be true when the thread completes
@finished_execution = Concurrent::AtomicBoolean.new(false)

# @finished_run signals that the run methods called in the pipeline thread was completed
# without errors and it will NOT be set if the run method exits from an exception; this
# is by design and necessary for the wait_until_started semantic
@finished_run = Concurrent::AtomicBoolean.new(false)

@logger.info(I18n.t('logstash.pipeline.effective_ecs_compatibility',
:pipeline_id => pipeline_id,
Expand All @@ -91,15 +69,23 @@ def finish_initialization
end # def initialize

def finished_execution?
@finished_execution.true?
control_state.isFinishedExecution()
end

def finished_run?
@finished_run.true?
control_state.isFinishedRun()
end

def ready?
@ready.value
control_state.isReady()
end

def events_consumed
@worker_observer.getEventsConsumed()
end

def events_filtered
@worker_observer.getEventsFiltered()
end

def safe_pipeline_worker_count
Expand Down Expand Up @@ -141,8 +127,9 @@ def start

@logger.debug("Starting pipeline", default_logging_keys)

@finished_execution.make_false
@finished_run.make_false
# reset to base state (TODO: cleanup smell)
control_state.setFinishedExecution(false)
control_state.setFinishedRun(false)

@thread = Thread.new do
error_log_params = ->(e) {
Expand All @@ -155,7 +142,7 @@ def start
LogStash::Util.set_thread_name("pipeline.#{pipeline_id}")
ThreadContext.put("pipeline.id", pipeline_id)
run
@finished_run.make_true
control_state.setFinishedRun(true)
rescue => e
logger.error("Pipeline error", error_log_params.call(e))
ensure
Expand All @@ -166,7 +153,7 @@ def start
rescue => e
logger.error("Pipeline close error, ignoring", error_log_params.call(e))
end
@finished_execution.make_true
control_state.setFinishedExecution(true)
@logger.info("Pipeline terminated", "pipeline.id" => pipeline_id)
end
end
Expand All @@ -182,7 +169,7 @@ def start

def wait_until_started
while true do
if @finished_run.true?
if finished_run?
# it completed run without exception
return true
elsif thread.nil? || !thread.alive?
Expand Down Expand Up @@ -225,23 +212,23 @@ def run
end

def transition_to_running
@running.make_true
control_state.setRunning(true)
end

def transition_to_stopped
@running.make_false
control_state.setRunning(false)
end

def running?
@running.true?
control_state.isRunning()
end

def stopped?
@running.false?
!running?
end

def crashed?
@crash_detected.true?
control_state.isCrashDetected()
end

# register_plugins calls #register_plugin on the plugins list and upon exception will call Plugin#do_close on all registered plugins
Expand All @@ -259,9 +246,12 @@ def register_plugins(plugins)

def start_workers
@worker_threads.clear # In case we're restarting the pipeline
@outputs_registered.make_false
# @outputs_registered.make_false
begin
maybe_setup_out_plugins
worker_stage = lir_execution.worker_stage

register_plugins(worker_stage.outputs)
register_plugins(worker_stage.filters)

pipeline_workers = safe_pipeline_worker_count
@preserve_event_order = preserve_event_order?(pipeline_workers)
Expand Down Expand Up @@ -293,14 +283,12 @@ def start_workers
"pipeline.sources" => pipeline_source_details)
@logger.info("Starting pipeline", pipeline_log_params)

filter_queue_client.set_batch_dimensions(batch_size, batch_delay)

# First launch WorkerLoop initialization in separate threads which concurrently
# compiles and initializes the worker pipelines

workers_init_start = Time.now
worker_loops = pipeline_workers.times
.map { Thread.new { init_worker_loop } }
.map { Thread.new { init_worker_loop(worker_stage) } }
.map(&:value)
workers_init_elapsed = Time.now - workers_init_start

Expand All @@ -319,7 +307,7 @@ def start_workers
rescue => e
# WorkerLoop.run() catches all Java Exception class and re-throws as IllegalStateException with the
# original exception as the cause
@crash_detected.make_true
control_state.setCrashDetected(true)
@logger.error(
"Pipeline worker error, the pipeline will be stopped",
exception_logging_keys(e.cause)
Expand All @@ -334,7 +322,7 @@ def start_workers
begin
start_inputs
rescue => e
@crash_detected.make_true
control_state.setCrashDetected(true)
# if there is any exception in starting inputs, make sure we shutdown workers.
# exception will already by logged in start_inputs
shutdown_workers
Expand All @@ -343,7 +331,7 @@ def start_workers
ensure
# it is important to guarantee @ready to be true after the startup sequence has been completed
# to potentially unblock the shutdown method which may be waiting on @ready to proceed
@ready.make_true
control_state.setReady(true)
end
end

Expand Down Expand Up @@ -482,7 +470,7 @@ def stop_inputs
# tell the worker threads to stop and then block until they've fully stopped
# This also stops all filter and output plugins
def shutdown_workers
@shutdownRequested.set(true)
control_state.setShutdownRequested(true)

@worker_threads.each do |t|
@logger.debug("Shutdown waiting for worker thread", default_logging_keys(:thread => t.inspect))
Expand All @@ -506,7 +494,7 @@ def flush_filters(options = {}, &block)
def start_flusher
# Invariant to help detect improper initialization
raise "Attempted to start flusher on a stopped pipeline!" if stopped?
@flusher_thread = org.logstash.execution.PeriodicFlush.new(@flushRequested, @flushing)
@flusher_thread = control_state.createPeriodicFlush()
@flusher_thread.start
end

Expand Down Expand Up @@ -562,14 +550,14 @@ def inspect
{
:pipeline_id => pipeline_id,
:settings => settings.inspect,
:ready => @ready,
:running => @running,
:flushing => @flushing
:ready => control_state.isReady(),
:running => control_state.isRunning(),
:flushing => control_state.isFlushing(),
}
end

def shutdown_requested?
@shutdownRequested.get
control_state.isShutdownRequested()
end

def worker_threads_draining?
Expand All @@ -589,19 +577,13 @@ def close_plugin_and_ignore(plugin)
end

# @return [WorkerLoop] a new WorkerLoop instance or nil upon construction exception
def init_worker_loop
def init_worker_loop(worker_stage)
begin
org.logstash.execution.WorkerLoop.new(
filter_queue_client, # QueueReadClient
lir_execution, # CompiledPipeline
worker_stage, # CompiledPipeline.WorkerStage
@worker_observer, # WorkerObserver
# pipeline reporter counters
@events_consumed, # LongAdder
@events_filtered, # LongAdder
# signaling channels
@flushRequested, # AtomicBoolean
@flushing, # AtomicBoolean
@shutdownRequested, # AtomicBoolean
control_state, # PipelineControlState
# behaviour config pass-through
@drain_queue, # boolean
@preserve_event_order) # boolean
Expand All @@ -619,7 +601,7 @@ def init_worker_loop
end

def maybe_setup_out_plugins
if @outputs_registered.make_true
if control_state.claimOutputsRegistration()
register_plugins(outputs)
register_plugins(filters)
end
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
package org.logstash;

import java.util.ArrayList;
import java.util.List;
import java.util.function.Consumer;

public record QueueReadClientSettings(
int batchSize,
int batchDelay
) {

public QueueReadClientSettings {
final List<String> errors = new ArrayList<>();

if (batchSize <= 0) {
errors.add("batchSize must be greater than 0");
}
if (batchDelay <= 0) {
errors.add("batchDelay must be greater than 0");
}

if (!errors.isEmpty()) {
throw new RuntimeException(String.format("Invalid settings: %s", errors));
}
}

public static QueueReadClientSettings build(Consumer<Builder> builderConsumer) {
final Builder builder = new Builder();
builderConsumer.accept(builder);
return builder.build();
}

public static class Builder {
int batchSize = 125;
int batchDelay = 50;

private Builder() {}

public Builder setBatchSize(int batchSize) {
this.batchSize = batchSize;
return this;
}

public Builder setBatchDelay(int batchDelay) {
this.batchDelay = batchDelay;
return this;
}

QueueReadClientSettings build() {
return new QueueReadClientSettings(batchSize, batchDelay);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
import org.jruby.runtime.ThreadContext;
import org.jruby.runtime.builtin.IRubyObject;
import org.logstash.Event;
import org.logstash.QueueReadClientSettings;
import org.logstash.RubyUtil;
import org.logstash.ackedqueue.ext.JRubyWrappedAckedQueueExt;
import org.logstash.common.SettingKeyDefinitions;
Expand Down Expand Up @@ -168,6 +169,14 @@ private static Settings extractQueueSettings(final IRubyObject settings) {
.build();
}

public static QueueReadClientSettings extractQueueReadClientSettings(final IRubyObject settings) {
final ThreadContext context = settings.getRuntime().getCurrentContext();
return QueueReadClientSettings.build((builder) -> {
builder.setBatchSize(getSetting(context, settings, PIPELINE_BATCH_SIZE).toJava(Integer.class));
builder.setBatchDelay(getSetting(context, settings, PIPELINE_BATCH_DELAY).toJava(Integer.class));
});
}

private static CompressionCodec.Factory extractConfiguredCodec(final IRubyObject settings) {
final ThreadContext context = settings.getRuntime().getCurrentContext();
final String compressionSetting = getSetting(context, settings, QUEUE_COMPRESSION).asJavaString();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@

import co.elastic.logstash.api.Metric;
import org.jruby.Ruby;
import org.jruby.RubyBoolean;
import org.jruby.RubyClass;
import org.jruby.api.Convert;
import org.jruby.anno.JRubyClass;
Expand Down Expand Up @@ -152,7 +151,7 @@ public IRubyObject rubyIsEmpty(ThreadContext context) {
}

@Override
protected JRubyAbstractQueueWriteClientExt getWriteClient(final ThreadContext context) {
protected JRubyAbstractQueueWriteClientExt getWriteClient() {
return JrubyAckedWriteClientExt.create(queue);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ public class SettingKeyDefinitions {

public static final String PIPELINE_BATCH_METRICS = "pipeline.batch.metrics.sampling_mode";

public static final String PIPELINE_BATCH_DELAY = "pipeline.batch.delay";

public static final String PIPELINE_BATCH_SIZE = "pipeline.batch.size";

public static final String PIPELINE_BATCH_OUTPUT_CHUNKING_GROWTH_THRESHOLD_FACTOR = "pipeline.batch.output_chunking.growth_threshold_factor";
Expand Down
Loading
Loading