2014-04-15 11:32:41 +00:00
# encoding: utf-8
2016-05-17 15:21:37 +00:00
require 'logstash/outputs/base'
require 'logstash/namespace'
require 'concurrent'
require 'stud/interval'
require 'java'
require 'logstash-output-jdbc_jars'
2017-04-09 08:17:05 +00:00
require 'json'
2016-05-17 15:21:37 +00:00
# Write events to a SQL engine, using JDBC.
#
# It is upto the user of the plugin to correctly configure the plugin. This
# includes correctly crafting the SQL statement, and matching the number of
# parameters correctly.
2014-04-15 11:32:41 +00:00
class LogStash :: Outputs :: Jdbc < LogStash :: Outputs :: Base
2016-10-05 10:44:03 +00:00
concurrency :shared
2016-07-13 16:44:47 +00:00
2016-05-17 15:21:37 +00:00
STRFTIME_FMT = '%Y-%m-%d %T.%L' . freeze
2016-05-03 14:28:01 +00:00
2016-05-20 15:30:48 +00:00
RETRYABLE_SQLSTATE_CLASSES = [
# Classes of retryable SQLSTATE codes
# Not all in the class will be retryable. However, this is the best that
# we've got right now.
2016-05-27 11:09:48 +00:00
# If a custom state code is required, set it in retry_sql_states.
2016-05-20 15:30:48 +00:00
'08' , # Connection Exception
'24' , # Invalid Cursor State (Maybe retry-able in some circumstances)
'25' , # Invalid Transaction State
'40' , # Transaction Rollback
'53' , # Insufficient Resources
'54' , # Program Limit Exceeded (MAYBE)
'55' , # Object Not In Prerequisite State
'57' , # Operator Intervention
'58' , # System Error
2016-05-17 15:21:37 +00:00
] . freeze
2016-05-17 10:29:49 +00:00
2016-05-17 15:21:37 +00:00
config_name 'jdbc'
2014-04-15 11:32:41 +00:00
2015-12-23 09:42:53 +00:00
# Driver class - Reintroduced for https://github.com/theangryangel/logstash-output-jdbc/issues/26
2016-05-17 15:21:37 +00:00
config :driver_class , validate : :string
2015-12-23 09:42:53 +00:00
# Does the JDBC driver support autocommit?
2016-05-17 15:21:37 +00:00
config :driver_auto_commit , validate : :boolean , default : true , required : true
2014-04-15 11:32:41 +00:00
2015-11-14 20:04:16 +00:00
# Where to find the jar
# Defaults to not required, and to the original behaviour
2016-05-17 15:21:37 +00:00
config :driver_jar_path , validate : :string , required : false
2015-11-14 20:04:16 +00:00
# jdbc connection string
2016-05-17 15:21:37 +00:00
config :connection_string , validate : :string , required : true
2014-04-15 11:32:41 +00:00
2015-11-14 20:04:16 +00:00
# jdbc username - optional, maybe in the connection string
2016-05-17 15:21:37 +00:00
config :username , validate : :string , required : false
2015-11-14 20:04:16 +00:00
# jdbc password - optional, maybe in the connection string
2016-05-17 15:21:37 +00:00
config :password , validate : :string , required : false
2015-11-14 20:04:16 +00:00
2014-08-09 12:56:11 +00:00
# [ "insert into table (message) values(?)", "%{message}" ]
2016-05-17 15:21:37 +00:00
config :statement , validate : :array , required : true
2014-05-31 13:33:59 +00:00
2015-11-15 12:35:57 +00:00
# If this is an unsafe statement, use event.sprintf
2016-05-17 15:21:37 +00:00
# This also has potential performance penalties due to having to create a
2015-11-15 12:35:57 +00:00
# new statement for each event, rather than adding to the batch and issuing
# multiple inserts in 1 go
2016-05-17 15:21:37 +00:00
config :unsafe_statement , validate : :boolean , default : false
2015-11-15 12:35:57 +00:00
2015-11-14 20:04:16 +00:00
# Number of connections in the pool to maintain
2016-07-13 16:44:47 +00:00
config :max_pool_size , validate : :number , default : 24
2015-11-14 20:04:16 +00:00
# Connection timeout
2016-05-17 15:21:37 +00:00
config :connection_timeout , validate : :number , default : 10000
2015-11-14 20:04:16 +00:00
2014-06-01 12:16:33 +00:00
# We buffer a certain number of events before flushing that out to SQL.
# This setting controls how many events will be buffered before sending a
# batch of events.
2016-05-17 15:21:37 +00:00
config :flush_size , validate : :number , default : 1000
2014-05-31 13:33:59 +00:00
2016-05-14 21:17:34 +00:00
# Set initial interval in seconds between retries. Doubled on each retry up to `retry_max_interval`
2016-05-17 15:21:37 +00:00
config :retry_initial_interval , validate : :number , default : 2
2016-05-14 21:17:34 +00:00
# Maximum time between retries, in seconds
2016-05-17 15:21:37 +00:00
config :retry_max_interval , validate : :number , default : 128
2016-05-14 21:17:34 +00:00
2016-05-27 11:09:48 +00:00
# Any additional custom, retryable SQL state codes.
# Suitable for configuring retryable custom JDBC SQL state codes.
config :retry_sql_states , validate : :array , default : [ ]
2016-08-28 20:48:06 +00:00
# Run a connection test on start.
config :connection_test , validate : :boolean , default : true
2016-12-17 13:07:48 +00:00
# Connection test and init string, required for some JDBC endpoints
# notable phoenix-thin - see logstash-output-jdbc issue #60
config :connection_test_query , validate : :string , required : false
2016-05-14 21:17:34 +00:00
# Maximum number of sequential failed attempts, before we stop retrying.
2014-08-09 12:56:11 +00:00
# If set to < 1, then it will infinitely retry.
2016-05-17 10:29:49 +00:00
# At the default values this is a little over 10 minutes
2016-05-17 15:21:37 +00:00
config :max_flush_exceptions , validate : :number , default : 10
2014-08-09 12:56:11 +00:00
2016-05-17 15:21:37 +00:00
config :max_repeat_exceptions , obsolete : 'This has been replaced by max_flush_exceptions - which behaves slightly differently. Please check the documentation.'
config :max_repeat_exceptions_time , obsolete : 'This is no longer required'
config :idle_flush_time , obsolete : 'No longer necessary under Logstash v5'
2014-08-09 12:56:11 +00:00
2014-04-15 11:32:41 +00:00
def register
2016-05-17 15:21:37 +00:00
@logger . info ( 'JDBC - Starting up' )
2014-04-15 11:32:41 +00:00
2015-11-14 20:04:16 +00:00
load_jar_files!
2014-04-15 11:32:41 +00:00
2016-05-14 21:17:34 +00:00
@stopping = Concurrent :: AtomicBoolean . new ( false )
2015-11-18 10:06:11 +00:00
2016-05-17 15:21:37 +00:00
@logger . warn ( 'JDBC - Flush size is set to > 1000' ) if @flush_size > 1000
2014-08-09 12:56:11 +00:00
2016-05-17 15:21:37 +00:00
if @statement . empty?
@logger . error ( 'JDBC - No statement provided. Configuration error.' )
2015-12-23 10:06:50 +00:00
end
2016-05-17 15:21:37 +00:00
if ! @unsafe_statement && @statement . length < 2
2015-12-23 10:06:50 +00:00
@logger . error ( " JDBC - Statement has no parameters. No events will be inserted into SQL as you're not passing any event data. Likely configuration error. " )
end
2015-12-30 12:04:23 +00:00
setup_and_test_pool!
2014-05-31 13:33:59 +00:00
end
2014-04-16 15:41:45 +00:00
2016-05-14 21:17:34 +00:00
def multi_receive ( events )
events . each_slice ( @flush_size ) do | slice |
retrying_submit ( slice )
2014-08-09 12:56:11 +00:00
end
end
2016-05-14 21:17:34 +00:00
def close
@stopping . make_true
2016-05-27 11:09:48 +00:00
@pool . close
super
2016-05-14 21:17:34 +00:00
end
2015-11-14 20:04:16 +00:00
private
2016-05-17 15:21:37 +00:00
def setup_and_test_pool!
2015-12-30 12:04:23 +00:00
# Setup pool
@pool = Java :: ComZaxxerHikari :: HikariDataSource . new
@pool . setAutoCommit ( @driver_auto_commit )
@pool . setDriverClassName ( @driver_class ) if @driver_class
@pool . setJdbcUrl ( @connection_string )
@pool . setUsername ( @username ) if @username
@pool . setPassword ( @password ) if @password
@pool . setMaximumPoolSize ( @max_pool_size )
@pool . setConnectionTimeout ( @connection_timeout )
2016-02-16 15:29:08 +00:00
validate_connection_timeout = ( @connection_timeout / 1000 ) / 2
2016-12-17 13:07:48 +00:00
if ! @connection_test_query . nil? and @connection_test_query . length > 1
@pool . setConnectionTestQuery ( @connection_test_query )
@pool . setConnectionInitSql ( @connection_test_query )
end
2016-08-28 20:48:06 +00:00
return unless @connection_test
2015-12-30 12:04:23 +00:00
# Test connection
2016-05-17 15:21:37 +00:00
test_connection = @pool . getConnection
2016-02-16 15:29:08 +00:00
unless test_connection . isValid ( validate_connection_timeout )
2016-09-02 13:48:01 +00:00
@logger . warn ( 'JDBC - Connection is not reporting as validate. Either connection is invalid, or driver is not getting the appropriate response.' )
2015-12-30 12:04:23 +00:00
end
2016-05-17 15:21:37 +00:00
test_connection . close
2015-12-30 12:04:23 +00:00
end
2015-11-14 20:04:16 +00:00
def load_jar_files!
# Load jar from driver path
unless @driver_jar_path . nil?
2016-05-17 15:31:35 +00:00
raise LogStash :: ConfigurationError , 'JDBC - Could not find jar file at given path. Check config.' unless File . exist? @driver_jar_path
2015-11-14 20:04:16 +00:00
require @driver_jar_path
return
end
# Revert original behaviour of loading from vendor directory
# if no path given
2016-05-17 15:21:37 +00:00
jarpath = if ENV [ 'LOGSTASH_HOME' ]
File . join ( ENV [ 'LOGSTASH_HOME' ] , '/vendor/jar/jdbc/*.jar' )
else
File . join ( File . dirname ( __FILE__ ) , '../../../vendor/jar/jdbc/*.jar' )
end
2015-11-14 20:04:16 +00:00
2016-09-02 13:48:01 +00:00
@logger . trace ( 'JDBC - jarpath' , path : jarpath )
2015-11-14 20:04:16 +00:00
jars = Dir [ jarpath ]
2016-05-17 15:31:35 +00:00
raise LogStash :: ConfigurationError , 'JDBC - No jars found. Have you read the README?' if jars . empty?
2015-11-14 20:04:16 +00:00
jars . each do | jar |
2016-09-02 13:48:01 +00:00
@logger . trace ( 'JDBC - Loaded jar' , jar : jar )
2015-11-14 20:04:16 +00:00
require jar
end
end
2016-05-17 15:21:37 +00:00
2016-05-17 10:29:49 +00:00
def submit ( events )
2016-04-16 13:33:30 +00:00
connection = nil
statement = nil
2016-05-14 21:17:34 +00:00
events_to_retry = [ ]
2016-05-17 15:21:37 +00:00
2016-04-16 13:33:30 +00:00
begin
2016-05-17 15:21:37 +00:00
connection = @pool . getConnection
2016-05-14 21:17:34 +00:00
rescue = > e
2016-06-29 12:52:29 +00:00
log_jdbc_exception ( e , true )
2016-05-27 11:09:48 +00:00
# If a connection is not available, then the server has gone away
# We're not counting that towards our retry count.
return events , false
2016-05-14 21:17:34 +00:00
end
2015-11-15 12:35:57 +00:00
2016-05-14 21:17:34 +00:00
events . each do | event |
begin
2016-05-17 15:21:37 +00:00
statement = connection . prepareStatement (
( @unsafe_statement == true ) ? event . sprintf ( @statement [ 0 ] ) : @statement [ 0 ]
)
2016-04-16 13:33:30 +00:00
statement = add_statement_event_params ( statement , event ) if @statement . length > 1
2016-05-17 15:21:37 +00:00
statement . execute
2016-05-20 15:30:48 +00:00
rescue = > e
2016-05-27 11:09:48 +00:00
if retry_exception? ( e )
2016-05-17 10:29:49 +00:00
events_to_retry . push ( event )
end
2016-05-14 21:17:34 +00:00
ensure
2016-05-17 15:21:37 +00:00
statement . close unless statement . nil?
2016-05-14 21:17:34 +00:00
end
end
2016-05-17 15:21:37 +00:00
connection . close unless connection . nil?
2016-05-27 11:09:48 +00:00
return events_to_retry , true
2016-05-14 21:17:34 +00:00
end
def retrying_submit ( actions )
# Initially we submit the full list of actions
submit_actions = actions
2016-05-27 11:09:48 +00:00
count_as_attempt = true
2016-05-14 21:17:34 +00:00
2016-06-29 17:48:12 +00:00
attempts = 1
2016-05-14 21:17:34 +00:00
sleep_interval = @retry_initial_interval
2016-05-27 11:09:48 +00:00
while @stopping . false? and ( submit_actions and ! submit_actions . empty? )
2016-05-14 21:17:34 +00:00
return if ! submit_actions || submit_actions . empty? # If everything's a success we move along
2016-05-17 10:29:49 +00:00
# We retry whatever didn't succeed
2016-05-27 11:09:48 +00:00
submit_actions , count_as_attempt = submit ( submit_actions )
2016-05-17 10:29:49 +00:00
# Everything was a success!
break if ! submit_actions || submit_actions . empty?
2016-05-14 21:17:34 +00:00
2016-05-27 11:09:48 +00:00
if @max_flush_exceptions > 0 and count_as_attempt == true
2016-05-14 21:17:34 +00:00
attempts += 1
2016-04-16 13:33:30 +00:00
2016-05-14 21:17:34 +00:00
if attempts > @max_flush_exceptions
2016-05-17 10:29:49 +00:00
@logger . error ( " JDBC - max_flush_exceptions has been reached. #{ submit_actions . length } events have been unable to be sent to SQL and are being dropped. See previously logged exceptions for details. " )
2016-05-14 21:17:34 +00:00
break
end
2015-11-17 10:32:16 +00:00
end
2016-05-14 21:17:34 +00:00
# If we're retrying the action sleep for the recommended interval
# Double the interval for the next time through to achieve exponential backoff
Stud . stoppable_sleep ( sleep_interval ) { @stopping . true? }
sleep_interval = next_sleep_interval ( sleep_interval )
2015-11-15 12:35:57 +00:00
end
end
def add_statement_event_params ( statement , event )
@statement [ 1 .. - 1 ] . each_with_index do | i , idx |
2016-07-07 10:00:33 +00:00
if i . is_a? String
value = event . get ( i )
if value . nil? and i =~ / % \ { /
value = event . sprintf ( i )
end
else
value = i
end
2016-09-15 19:33:40 +00:00
2016-07-07 07:53:49 +00:00
case value
Fix toString method of LogStash::Timestamp
According to LogStash::Timestamp (https://github.com/elastic/logstash/blob/bb30cc773bcf34d63d634dbfcf2803278a5140b3/logstash-core-event/lib/logstash/timestamp.rb) doesn't support iso8601, which results in error if the timestamp of logstash is used directly.
If should support to_s of to_iso8601.
:message=>"Failed to flush outgoing items", :outgoing_count=>1, :exception=>"NoMethodError", :backtrace=>["/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:255:in `add_statement_event_params'", "org/jruby/RubyArray.java:1613:in `each'", "org/jruby/RubyEnumerable.java:974:in `each_with_index'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:251:in `add_statement_event_params'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:203:in `safe_flush'", "org/jruby/RubyArray.java:1613:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:200:in `safe_flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:120:in `flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:219:in `buffer_flush'", "org/jruby/RubyHash.java:1342:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:216:in `buffer_flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:159:in `buffer_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:113:in `receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/outputs/base.rb:83:in `multi_receive'", "org/jruby/RubyArray.java:1613:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/outputs/base.rb:83:in `multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/output_delegator.rb:130:in `worker_multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/output_delegator.rb:114:in `multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:305:in `output_batch'", "org/jruby/RubyHash.java:1342:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:305:in `output_batch'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:236:in `worker_loop'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:205:in `start_workers'"], :level=>:warn}
2016-04-11 13:19:48 +00:00
when Time
2016-05-03 14:28:01 +00:00
# See LogStash::Timestamp, below, for the why behind strftime.
2016-07-07 07:53:49 +00:00
statement . setString ( idx + 1 , value . strftime ( STRFTIME_FMT ) )
Fix toString method of LogStash::Timestamp
According to LogStash::Timestamp (https://github.com/elastic/logstash/blob/bb30cc773bcf34d63d634dbfcf2803278a5140b3/logstash-core-event/lib/logstash/timestamp.rb) doesn't support iso8601, which results in error if the timestamp of logstash is used directly.
If should support to_s of to_iso8601.
:message=>"Failed to flush outgoing items", :outgoing_count=>1, :exception=>"NoMethodError", :backtrace=>["/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:255:in `add_statement_event_params'", "org/jruby/RubyArray.java:1613:in `each'", "org/jruby/RubyEnumerable.java:974:in `each_with_index'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:251:in `add_statement_event_params'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:203:in `safe_flush'", "org/jruby/RubyArray.java:1613:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:200:in `safe_flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:120:in `flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:219:in `buffer_flush'", "org/jruby/RubyHash.java:1342:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:216:in `buffer_flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.22/lib/stud/buffer.rb:159:in `buffer_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-jdbc-0.2.3/lib/logstash/outputs/jdbc.rb:113:in `receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/outputs/base.rb:83:in `multi_receive'", "org/jruby/RubyArray.java:1613:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/outputs/base.rb:83:in `multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/output_delegator.rb:130:in `worker_multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/output_delegator.rb:114:in `multi_receive'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:305:in `output_batch'", "org/jruby/RubyHash.java:1342:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:305:in `output_batch'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:236:in `worker_loop'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-core-2.3.0-java/lib/logstash/pipeline.rb:205:in `start_workers'"], :level=>:warn}
2016-04-11 13:19:48 +00:00
when LogStash :: Timestamp
2016-05-17 15:21:37 +00:00
# XXX: Using setString as opposed to setTimestamp, because setTimestamp
2016-05-03 14:28:01 +00:00
# doesn't behave correctly in some drivers (Known: sqlite)
#
2016-05-17 15:21:37 +00:00
# Additionally this does not use `to_iso8601`, since some SQL databases
2016-05-03 14:28:01 +00:00
# choke on the 'T' in the string (Known: Derby).
#
# strftime appears to be the most reliable across drivers.
2016-07-07 07:53:49 +00:00
statement . setString ( idx + 1 , value . time . strftime ( STRFTIME_FMT ) )
2015-11-15 12:35:57 +00:00
when Fixnum , Integer
2016-09-15 19:33:40 +00:00
if value > 2147483647 or value < - 2147483648
statement . setLong ( idx + 1 , value )
else
statement . setInt ( idx + 1 , value )
end
2015-11-15 12:35:57 +00:00
when Float
2016-07-07 07:53:49 +00:00
statement . setFloat ( idx + 1 , value )
2015-11-15 12:35:57 +00:00
when String
2016-07-07 07:53:49 +00:00
statement . setString ( idx + 1 , value )
2017-04-09 08:17:05 +00:00
when Array , Hash
statement . setString ( idx + 1 , value . to_json )
2016-05-17 15:21:37 +00:00
when true , false
2016-07-07 07:53:49 +00:00
statement . setBoolean ( idx + 1 , value )
2015-11-15 12:35:57 +00:00
else
2016-07-07 07:53:49 +00:00
statement . setString ( idx + 1 , nil )
2015-11-15 12:35:57 +00:00
end
end
statement
end
2015-11-17 10:32:16 +00:00
2016-05-27 11:09:48 +00:00
def retry_exception? ( exception )
2016-07-07 08:32:48 +00:00
retrying = ( exception . respond_to? 'getSQLState' and ( RETRYABLE_SQLSTATE_CLASSES . include? ( exception . getSQLState . to_s [ 0 , 2 ] ) or @retry_sql_states . include? ( exception . getSQLState ) ) )
2016-06-29 12:52:29 +00:00
log_jdbc_exception ( exception , retrying )
retrying
2016-05-27 11:09:48 +00:00
end
2016-06-29 12:52:29 +00:00
def log_jdbc_exception ( exception , retrying )
2016-05-02 17:11:27 +00:00
current_exception = exception
2016-07-07 08:32:48 +00:00
log_text = 'JDBC - Exception. ' + ( retrying ? 'Retrying' : 'Not retrying' ) + '.'
log_method = ( retrying ? 'warn' : 'error' )
2016-07-07 07:53:49 +00:00
2015-11-17 10:32:16 +00:00
loop do
2016-09-15 19:33:40 +00:00
@logger . send ( log_method , log_text , :exception = > current_exception )
2016-07-07 07:53:49 +00:00
if current_exception . respond_to? 'getNextException'
current_exception = current_exception . getNextException ( )
2016-06-29 12:52:29 +00:00
else
2016-07-07 07:53:49 +00:00
current_exception = nil
2016-06-29 12:52:29 +00:00
end
2016-07-07 07:53:49 +00:00
break if current_exception == nil
2015-11-17 10:32:16 +00:00
end
end
2016-05-14 21:17:34 +00:00
def next_sleep_interval ( current_interval )
doubled = current_interval * 2
doubled > @retry_max_interval ? @retry_max_interval : doubled
end
2014-04-15 11:32:41 +00:00
end # class LogStash::Outputs::jdbc