logstash-output-jdbc/lib/logstash/outputs/jdbc.rb

322 lines
10 KiB
Ruby
Raw Normal View History

2014-04-15 11:32:41 +00:00
# encoding: utf-8
require 'logstash/outputs/base'
require 'logstash/namespace'
require 'concurrent'
require 'stud/interval'
require 'java'
require 'logstash-output-jdbc_jars'
# Write events to a SQL engine, using JDBC.
#
# It is upto the user of the plugin to correctly configure the plugin. This
# includes correctly crafting the SQL statement, and matching the number of
# parameters correctly.
2014-04-15 11:32:41 +00:00
class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
STRFTIME_FMT = '%Y-%m-%d %T.%L'.freeze
2016-05-03 14:28:01 +00:00
2016-05-17 10:29:49 +00:00
# Will never work, but only because it duplicates data (i.e. duplicate keys)
2016-05-17 11:05:02 +00:00
# Will log a warning, but not retry.
2016-05-17 10:29:49 +00:00
SQL_STATES_IGNORE = [
2016-05-17 11:05:02 +00:00
### Class: Unqualified Successful Completion
# Success. This shouldn't get thrown, but JDBC driver quality varies, so who knows.
0000,
### Class: Constraint Violation
2016-05-17 10:29:49 +00:00
# Integrity constraint violation.
23000,
# A violation of the constraint imposed by a unique index or a unique constraint occurred.
23505
].freeze
2016-05-17 10:29:49 +00:00
2016-05-17 11:05:02 +00:00
# Will log an error, but not retry.
2016-05-17 10:29:49 +00:00
SQL_STATES_FATAL = [
2016-05-17 11:05:02 +00:00
### Class: Data Exception
2016-05-17 10:29:49 +00:00
# Character data, right truncation occurred. Field too small.
22001,
# Numeric value out of range.
22003,
# A null value is not allowed.
22004,
# Invalid datetime format.
22007,
# A parameter or host variable value is invalid.
22023,
# Character conversion resulted in truncation.
22524,
### Constraint Violation
2016-05-17 10:29:49 +00:00
# The insert or update value of a foreign key is invalid.
23503,
# The range of values for the identity column or sequence is exhausted.
23522
].freeze
2016-05-17 10:29:49 +00:00
config_name 'jdbc'
2014-04-15 11:32:41 +00:00
2015-12-23 09:42:53 +00:00
# Driver class - Reintroduced for https://github.com/theangryangel/logstash-output-jdbc/issues/26
config :driver_class, validate: :string
2015-12-23 09:42:53 +00:00
# Does the JDBC driver support autocommit?
config :driver_auto_commit, validate: :boolean, default: true, required: true
2014-04-15 11:32:41 +00:00
2015-11-14 20:04:16 +00:00
# Where to find the jar
# Defaults to not required, and to the original behaviour
config :driver_jar_path, validate: :string, required: false
2015-11-14 20:04:16 +00:00
# jdbc connection string
config :connection_string, validate: :string, required: true
2014-04-15 11:32:41 +00:00
2015-11-14 20:04:16 +00:00
# jdbc username - optional, maybe in the connection string
config :username, validate: :string, required: false
2015-11-14 20:04:16 +00:00
# jdbc password - optional, maybe in the connection string
config :password, validate: :string, required: false
2015-11-14 20:04:16 +00:00
2014-08-09 12:56:11 +00:00
# [ "insert into table (message) values(?)", "%{message}" ]
config :statement, validate: :array, required: true
2014-05-31 13:33:59 +00:00
2015-11-15 12:35:57 +00:00
# If this is an unsafe statement, use event.sprintf
# This also has potential performance penalties due to having to create a
2015-11-15 12:35:57 +00:00
# new statement for each event, rather than adding to the batch and issuing
# multiple inserts in 1 go
config :unsafe_statement, validate: :boolean, default: false
2015-11-15 12:35:57 +00:00
2015-11-14 20:04:16 +00:00
# Number of connections in the pool to maintain
config :max_pool_size, validate: :number, default: 5
2015-11-14 20:04:16 +00:00
# Connection timeout
config :connection_timeout, validate: :number, default: 10000
2015-11-14 20:04:16 +00:00
# We buffer a certain number of events before flushing that out to SQL.
# This setting controls how many events will be buffered before sending a
# batch of events.
config :flush_size, validate: :number, default: 1000
2014-05-31 13:33:59 +00:00
# Set initial interval in seconds between retries. Doubled on each retry up to `retry_max_interval`
config :retry_initial_interval, validate: :number, default: 2
# Maximum time between retries, in seconds
config :retry_max_interval, validate: :number, default: 128
# Maximum number of sequential failed attempts, before we stop retrying.
2014-08-09 12:56:11 +00:00
# If set to < 1, then it will infinitely retry.
2016-05-17 10:29:49 +00:00
# At the default values this is a little over 10 minutes
config :max_flush_exceptions, validate: :number, default: 10
2014-08-09 12:56:11 +00:00
config :max_repeat_exceptions, obsolete: 'This has been replaced by max_flush_exceptions - which behaves slightly differently. Please check the documentation.'
config :max_repeat_exceptions_time, obsolete: 'This is no longer required'
config :idle_flush_time, obsolete: 'No longer necessary under Logstash v5'
2014-08-09 12:56:11 +00:00
2014-04-15 11:32:41 +00:00
def register
@logger.info('JDBC - Starting up')
2014-04-15 11:32:41 +00:00
LogStash::Logger.setup_log4j(@logger)
2015-11-14 20:04:16 +00:00
load_jar_files!
2014-04-15 11:32:41 +00:00
@stopping = Concurrent::AtomicBoolean.new(false)
@logger.warn('JDBC - Flush size is set to > 1000') if @flush_size > 1000
2014-08-09 12:56:11 +00:00
if @statement.empty?
@logger.error('JDBC - No statement provided. Configuration error.')
end
if !@unsafe_statement && @statement.length < 2
@logger.error("JDBC - Statement has no parameters. No events will be inserted into SQL as you're not passing any event data. Likely configuration error.")
end
setup_and_test_pool!
2014-05-31 13:33:59 +00:00
end
2014-04-16 15:41:45 +00:00
def multi_receive(events)
events.each_slice(@flush_size) do |slice|
retrying_submit(slice)
2014-08-09 12:56:11 +00:00
end
end
2014-04-16 15:41:45 +00:00
def teardown
@pool.close
2014-04-16 15:41:45 +00:00
super
2014-04-15 11:32:41 +00:00
end
def close
@stopping.make_true
end
2015-11-14 20:04:16 +00:00
private
def setup_and_test_pool!
# Setup pool
@pool = Java::ComZaxxerHikari::HikariDataSource.new
@pool.setAutoCommit(@driver_auto_commit)
@pool.setDriverClassName(@driver_class) if @driver_class
@pool.setJdbcUrl(@connection_string)
@pool.setUsername(@username) if @username
@pool.setPassword(@password) if @password
@pool.setMaximumPoolSize(@max_pool_size)
@pool.setConnectionTimeout(@connection_timeout)
2016-02-16 15:29:08 +00:00
validate_connection_timeout = (@connection_timeout / 1000) / 2
# Test connection
test_connection = @pool.getConnection
2016-02-16 15:29:08 +00:00
unless test_connection.isValid(validate_connection_timeout)
@logger.error('JDBC - Connection is not valid. Please check connection string or that your JDBC endpoint is available.')
end
test_connection.close
end
2015-11-14 20:04:16 +00:00
def load_jar_files!
# Load jar from driver path
unless @driver_jar_path.nil?
raise LogStash::ConfigurationError, 'JDBC - Could not find jar file at given path. Check config.' unless File.exist? @driver_jar_path
2015-11-14 20:04:16 +00:00
require @driver_jar_path
return
end
# Revert original behaviour of loading from vendor directory
# if no path given
jarpath = if ENV['LOGSTASH_HOME']
File.join(ENV['LOGSTASH_HOME'], '/vendor/jar/jdbc/*.jar')
else
File.join(File.dirname(__FILE__), '../../../vendor/jar/jdbc/*.jar')
end
2015-11-14 20:04:16 +00:00
@logger.debug('JDBC - jarpath', path: jarpath)
2015-11-14 20:04:16 +00:00
jars = Dir[jarpath]
raise LogStash::ConfigurationError, 'JDBC - No jars found. Have you read the README?' if jars.empty?
2015-11-14 20:04:16 +00:00
jars.each do |jar|
@logger.debug('JDBC - Loaded jar', jar: jar)
2015-11-14 20:04:16 +00:00
require jar
end
end
2016-05-17 10:29:49 +00:00
def submit(events)
2016-04-16 13:33:30 +00:00
connection = nil
statement = nil
events_to_retry = []
2016-04-16 13:33:30 +00:00
begin
connection = @pool.getConnection
rescue => e
log_jdbc_exception(e)
return events
end
2015-11-15 12:35:57 +00:00
events.each do |event|
begin
statement = connection.prepareStatement(
(@unsafe_statement == true) ? event.sprintf(@statement[0]) : @statement[0]
)
2016-04-16 13:33:30 +00:00
statement = add_statement_event_params(statement, event) if @statement.length > 1
statement.execute
2016-05-17 10:29:49 +00:00
rescue java.sql.SQLException => e
if SQL_STATES_IGNORE.include? e.getSQLState
2016-05-17 10:29:49 +00:00
@logger.warn('JDBC - Dropping event. Ignore-able exception (duplicate key most likely)', exception: e, event: event)
elsif SQL_STATES_FATAL.include? e.getSQLState
2016-05-17 10:29:49 +00:00
@logger.error('JDBC - Fatal SQL exception. Can never succeed. Dropping event.', exception: e, event: event)
else
log_jdbc_exception(e)
events_to_retry.push(event)
end
rescue => e
2016-05-17 10:29:49 +00:00
# Something else happened.
log_jdbc_exception(e)
events_to_retry.push(event)
ensure
statement.close unless statement.nil?
end
end
connection.close unless connection.nil?
events_to_retry
end
def retrying_submit(actions)
# Initially we submit the full list of actions
submit_actions = actions
attempts = 0
sleep_interval = @retry_initial_interval
while submit_actions && !submit_actions.empty?
return if !submit_actions || submit_actions.empty? # If everything's a success we move along
2016-05-17 10:29:49 +00:00
# We retry whatever didn't succeed
submit_actions = submit(submit_actions)
# Everything was a success!
break if !submit_actions || submit_actions.empty?
if @max_flush_exceptions > 0
attempts += 1
2016-04-16 13:33:30 +00:00
if attempts > @max_flush_exceptions
2016-05-17 10:29:49 +00:00
@logger.error("JDBC - max_flush_exceptions has been reached. #{submit_actions.length} events have been unable to be sent to SQL and are being dropped. See previously logged exceptions for details.")
break
end
2015-11-17 10:32:16 +00:00
end
# If we're retrying the action sleep for the recommended interval
# Double the interval for the next time through to achieve exponential backoff
Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
sleep_interval = next_sleep_interval(sleep_interval)
2015-11-15 12:35:57 +00:00
end
end
def add_statement_event_params(statement, event)
@statement[1..-1].each_with_index do |i, idx|
case event.get(i)
Fix toString method of LogStash::Timestamp According to LogStash::Timestamp (https://github.com/elastic/logstash/blob/bb30cc773bcf34d63d634dbfcf2803278a5140b3/logstash-core-event/lib/logstash/timestamp.rb) doesn't support iso8601, which results in error if the timestamp of logstash is used directly. If should support to_s of to_iso8601. :message=>"Failed to flush outgoing items", :outgoing_count=>1, :exception=>"NoMethodError", :backtrace=>["/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:255:in `add_statement_event_params'", "org/jruby/RubyArray.java:1613:in `each'", "org/jruby/RubyEnumerable.java:974:in `each_with_index'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:251:in `add_statement_event_params'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:203:in `safe_flush'", "org/jruby/RubyArray.java:1613:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:200:in `safe_flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:120:in `flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:219:in `buffer_flush'", "org/jruby/RubyHash.java:1342:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:216:in `buffer_flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:159:in `buffer_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:113:in `receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/outputs/base.rb:83:in `multi_receive'", "org/jruby/RubyArray.java:1613:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/outputs/base.rb:83:in `multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/output_delegator.rb:130:in `worker_multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/output_delegator.rb:114:in `multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:305:in `output_batch'", "org/jruby/RubyHash.java:1342:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:305:in `output_batch'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:236:in `worker_loop'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:205:in `start_workers'"], :level=>:warn}
2016-04-11 13:19:48 +00:00
when Time
2016-05-03 14:28:01 +00:00
# See LogStash::Timestamp, below, for the why behind strftime.
statement.setString(idx + 1, event.get(i).strftime(STRFTIME_FMT))
Fix toString method of LogStash::Timestamp According to LogStash::Timestamp (https://github.com/elastic/logstash/blob/bb30cc773bcf34d63d634dbfcf2803278a5140b3/logstash-core-event/lib/logstash/timestamp.rb) doesn't support iso8601, which results in error if the timestamp of logstash is used directly. If should support to_s of to_iso8601. :message=>"Failed to flush outgoing items", :outgoing_count=>1, :exception=>"NoMethodError", :backtrace=>["/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:255:in `add_statement_event_params'", "org/jruby/RubyArray.java:1613:in `each'", "org/jruby/RubyEnumerable.java:974:in `each_with_index'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:251:in `add_statement_event_params'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:203:in `safe_flush'", "org/jruby/RubyArray.java:1613:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:200:in `safe_flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:120:in `flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:219:in `buffer_flush'", "org/jruby/RubyHash.java:1342:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:216:in `buffer_flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:159:in `buffer_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:113:in `receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/outputs/base.rb:83:in `multi_receive'", "org/jruby/RubyArray.java:1613:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/outputs/base.rb:83:in `multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/output_delegator.rb:130:in `worker_multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/output_delegator.rb:114:in `multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:305:in `output_batch'", "org/jruby/RubyHash.java:1342:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:305:in `output_batch'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:236:in `worker_loop'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:205:in `start_workers'"], :level=>:warn}
2016-04-11 13:19:48 +00:00
when LogStash::Timestamp
# XXX: Using setString as opposed to setTimestamp, because setTimestamp
2016-05-03 14:28:01 +00:00
# doesn't behave correctly in some drivers (Known: sqlite)
#
# Additionally this does not use `to_iso8601`, since some SQL databases
2016-05-03 14:28:01 +00:00
# choke on the 'T' in the string (Known: Derby).
#
# strftime appears to be the most reliable across drivers.
statement.setString(idx + 1, event.get(i).time.strftime(STRFTIME_FMT))
2015-11-15 12:35:57 +00:00
when Fixnum, Integer
statement.setInt(idx + 1, event.get(i))
2015-11-15 12:35:57 +00:00
when Float
statement.setFloat(idx + 1, event.get(i))
2015-11-15 12:35:57 +00:00
when String
statement.setString(idx + 1, event.get(i))
when true, false
statement.setBoolean(idx + 1, event.get(i))
2015-11-15 12:35:57 +00:00
else
if event.get(i).nil? && i =~ /%\{/
statement.setString(idx + 1, event.sprintf(i))
else
statement.setString(idx + 1, nil)
end
2015-11-15 12:35:57 +00:00
end
end
statement
end
2015-11-17 10:32:16 +00:00
2016-05-02 17:11:27 +00:00
def log_jdbc_exception(exception)
current_exception = exception
2015-11-17 10:32:16 +00:00
loop do
@logger.warn('JDBC Exception encountered: Will automatically retry.', exception: current_exception)
current_exception = current_exception.getNextException
break if current_exception.nil?
2015-11-17 10:32:16 +00:00
end
end
def next_sleep_interval(current_interval)
doubled = current_interval * 2
doubled > @retry_max_interval ? @retry_max_interval : doubled
end
2014-04-15 11:32:41 +00:00
end # class LogStash::Outputs::jdbc