2016-05-17 15:21:37 +00:00
|
|
|
require 'logstash/devutils/rspec/spec_helper'
|
|
|
|
require 'logstash/outputs/jdbc'
|
|
|
|
require 'stud/temporary'
|
|
|
|
require 'java'
|
2016-05-03 14:28:01 +00:00
|
|
|
require 'securerandom'
|
|
|
|
|
2017-11-08 17:25:39 +00:00
|
|
|
RSpec::Support::ObjectFormatter.default_instance.max_formatted_output_length = 80000
|
|
|
|
|
2016-09-15 20:32:34 +00:00
|
|
|
RSpec.configure do |c|
|
|
|
|
|
|
|
|
def start_service(name)
|
2016-09-15 21:00:34 +00:00
|
|
|
cmd = "sudo /etc/init.d/#{name}* start"
|
2016-09-15 20:32:34 +00:00
|
|
|
|
|
|
|
`which systemctl`
|
|
|
|
if $?.success?
|
|
|
|
cmd = "sudo systemctl start #{name}"
|
|
|
|
end
|
|
|
|
|
|
|
|
`#{cmd}`
|
|
|
|
end
|
|
|
|
|
|
|
|
def stop_service(name)
|
|
|
|
cmd = "sudo /etc/init.d/#{name}* stop"
|
|
|
|
|
|
|
|
`which systemctl`
|
|
|
|
if $?.success?
|
|
|
|
cmd = "sudo systemctl stop #{name}"
|
|
|
|
end
|
|
|
|
|
|
|
|
`#{cmd}`
|
|
|
|
end
|
|
|
|
|
|
|
|
end
|
|
|
|
|
2016-05-03 14:28:01 +00:00
|
|
|
RSpec.shared_context 'rspec setup' do
|
|
|
|
it 'ensure jar is available' do
|
|
|
|
expect(ENV[jdbc_jar_env]).not_to be_nil, "#{jdbc_jar_env} not defined, required to run tests"
|
2016-05-17 15:21:37 +00:00
|
|
|
expect(File.exist?(ENV[jdbc_jar_env])).to eq(true), "#{jdbc_jar_env} defined, but not valid"
|
2016-05-03 14:28:01 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
RSpec.shared_context 'when initializing' do
|
|
|
|
it 'shouldn\'t register with a missing jar file' do
|
|
|
|
jdbc_settings['driver_jar_path'] = nil
|
2016-05-17 15:21:37 +00:00
|
|
|
plugin = LogStash::Plugin.lookup('output', 'jdbc').new(jdbc_settings)
|
2016-05-17 15:31:35 +00:00
|
|
|
expect { plugin.register }.to raise_error(LogStash::ConfigurationError)
|
2016-05-03 14:28:01 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
RSpec.shared_context 'when outputting messages' do
|
2016-09-02 18:01:28 +00:00
|
|
|
let(:logger) {
|
|
|
|
double("logger")
|
|
|
|
}
|
2016-06-29 17:48:12 +00:00
|
|
|
|
2016-05-17 15:21:37 +00:00
|
|
|
let(:jdbc_test_table) do
|
2016-05-03 14:28:01 +00:00
|
|
|
'logstash_output_jdbc_test'
|
|
|
|
end
|
|
|
|
|
|
|
|
let(:jdbc_drop_table) do
|
2016-06-29 17:48:12 +00:00
|
|
|
"DROP TABLE #{jdbc_test_table}"
|
2016-05-03 14:28:01 +00:00
|
|
|
end
|
|
|
|
|
2017-11-08 17:25:39 +00:00
|
|
|
let(:jdbc_statement_fields) do
|
|
|
|
[
|
|
|
|
{db_field: "created_at", db_type: "datetime", db_value: '?', event_field: '@timestamp'},
|
|
|
|
{db_field: "message", db_type: "varchar(512)", db_value: '?', event_field: 'message'},
|
|
|
|
{db_field: "message_sprintf", db_type: "varchar(512)", db_value: '?', event_field: 'sprintf-%{message}'},
|
|
|
|
{db_field: "static_int", db_type: "int", db_value: '?', event_field: 'int'},
|
|
|
|
{db_field: "static_bigint", db_type: "bigint", db_value: '?', event_field: 'bigint'},
|
|
|
|
{db_field: "static_float", db_type: "float", db_value: '?', event_field: 'float'},
|
|
|
|
{db_field: "static_bool", db_type: "boolean", db_value: '?', event_field: 'bool'},
|
2017-11-08 17:31:59 +00:00
|
|
|
{db_field: "static_bigdec", db_type: "decimal", db_value: '?', event_field: 'bigdec'}
|
2017-11-08 17:25:39 +00:00
|
|
|
]
|
|
|
|
end
|
|
|
|
|
2016-05-03 14:28:01 +00:00
|
|
|
let(:jdbc_create_table) do
|
2017-11-08 17:25:39 +00:00
|
|
|
fields = jdbc_statement_fields.collect { |entry| "#{entry[:db_field]} #{entry[:db_type]} not null" }.join(", ")
|
|
|
|
|
|
|
|
"CREATE table #{jdbc_test_table} (#{fields})"
|
|
|
|
end
|
|
|
|
|
|
|
|
let(:jdbc_drop_table) do
|
|
|
|
"DROP table #{jdbc_test_table}"
|
2016-07-07 10:00:33 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
let(:jdbc_statement) do
|
2017-11-08 17:25:39 +00:00
|
|
|
fields = jdbc_statement_fields.collect { |entry| "#{entry[:db_field]}" }.join(", ")
|
|
|
|
values = jdbc_statement_fields.collect { |entry| "#{entry[:db_value]}" }.join(", ")
|
|
|
|
statement = jdbc_statement_fields.collect { |entry| entry[:event_field] }
|
|
|
|
|
|
|
|
statement.insert(0, "insert into #{jdbc_test_table} (#{fields}) values(#{values})")
|
2016-06-29 17:48:12 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
let(:systemd_database_service) do
|
|
|
|
nil
|
2016-05-03 14:28:01 +00:00
|
|
|
end
|
|
|
|
|
2017-11-08 17:25:39 +00:00
|
|
|
let(:event) do
|
|
|
|
# TODO: Auto generate fields from jdbc_statement_fields
|
|
|
|
LogStash::Event.new({
|
|
|
|
message: "test-message #{SecureRandom.uuid}",
|
|
|
|
float: 12.1,
|
|
|
|
bigint: 4000881632477184,
|
|
|
|
bool: true,
|
|
|
|
int: 1,
|
|
|
|
bigdec: BigDecimal.new("123.123")
|
|
|
|
})
|
2016-05-03 14:28:01 +00:00
|
|
|
end
|
|
|
|
|
2016-05-17 15:21:37 +00:00
|
|
|
let(:plugin) do
|
2016-09-02 14:18:15 +00:00
|
|
|
# Setup logger
|
2016-09-02 18:01:28 +00:00
|
|
|
allow(LogStash::Outputs::Jdbc).to receive(:logger).and_return(logger)
|
2017-11-08 17:25:39 +00:00
|
|
|
|
2016-09-02 18:01:28 +00:00
|
|
|
# XXX: Suppress reflection logging. There has to be a better way around this.
|
|
|
|
allow(logger).to receive(:debug).with(/config LogStash::/)
|
|
|
|
|
|
|
|
# Suppress beta warnings.
|
|
|
|
allow(logger).to receive(:info).with(/Please let us know if you find bugs or have suggestions on how to improve this plugin./)
|
|
|
|
|
|
|
|
# Suppress start up messages.
|
|
|
|
expect(logger).to receive(:info).once.with(/JDBC - Starting up/)
|
2016-09-02 14:18:15 +00:00
|
|
|
|
2016-05-03 14:28:01 +00:00
|
|
|
# Setup plugin
|
2016-05-17 15:21:37 +00:00
|
|
|
output = LogStash::Plugin.lookup('output', 'jdbc').new(jdbc_settings)
|
2016-05-03 14:28:01 +00:00
|
|
|
output.register
|
|
|
|
|
2017-11-08 17:25:39 +00:00
|
|
|
output
|
|
|
|
end
|
|
|
|
|
|
|
|
before :each do
|
2016-05-03 14:28:01 +00:00
|
|
|
# Setup table
|
2017-11-08 17:25:39 +00:00
|
|
|
c = plugin.instance_variable_get(:@pool).getConnection
|
2016-05-03 14:28:01 +00:00
|
|
|
|
2016-06-29 17:48:12 +00:00
|
|
|
# Derby doesn't support IF EXISTS.
|
|
|
|
# Seems like the quickest solution. Bleurgh.
|
|
|
|
begin
|
2016-05-17 15:21:37 +00:00
|
|
|
stmt = c.createStatement
|
2016-05-03 14:28:01 +00:00
|
|
|
stmt.executeUpdate(jdbc_drop_table)
|
2016-06-29 17:48:12 +00:00
|
|
|
rescue
|
|
|
|
# noop
|
|
|
|
ensure
|
2016-05-17 15:21:37 +00:00
|
|
|
stmt.close
|
2016-05-03 14:28:01 +00:00
|
|
|
|
2016-06-29 17:48:12 +00:00
|
|
|
stmt = c.createStatement
|
|
|
|
stmt.executeUpdate(jdbc_create_table)
|
|
|
|
stmt.close
|
|
|
|
c.close
|
|
|
|
end
|
2017-11-08 17:25:39 +00:00
|
|
|
end
|
2016-05-03 14:28:01 +00:00
|
|
|
|
2017-11-08 17:25:39 +00:00
|
|
|
# Delete table after each
|
|
|
|
after :each do
|
|
|
|
c = plugin.instance_variable_get(:@pool).getConnection
|
|
|
|
|
|
|
|
stmt = c.createStatement
|
|
|
|
stmt.executeUpdate(jdbc_drop_table)
|
|
|
|
stmt.close
|
|
|
|
c.close
|
2016-05-17 15:21:37 +00:00
|
|
|
end
|
2016-05-03 14:28:01 +00:00
|
|
|
|
|
|
|
it 'should save a event' do
|
2016-05-14 21:17:34 +00:00
|
|
|
expect { plugin.multi_receive([event]) }.to_not raise_error
|
2016-05-03 14:28:01 +00:00
|
|
|
|
|
|
|
# Verify the number of items in the output table
|
2016-05-17 15:21:37 +00:00
|
|
|
c = plugin.instance_variable_get(:@pool).getConnection
|
2017-11-08 17:25:39 +00:00
|
|
|
|
|
|
|
# TODO replace this simple count with a check of the actual contents
|
|
|
|
|
2016-05-03 14:28:01 +00:00
|
|
|
stmt = c.prepareStatement("select count(*) as total from #{jdbc_test_table} where message = ?")
|
2016-05-13 21:12:57 +00:00
|
|
|
stmt.setString(1, event.get('message'))
|
2016-05-17 15:21:37 +00:00
|
|
|
rs = stmt.executeQuery
|
2016-05-03 14:28:01 +00:00
|
|
|
count = 0
|
2016-05-17 15:21:37 +00:00
|
|
|
count = rs.getInt('total') while rs.next
|
|
|
|
stmt.close
|
|
|
|
c.close
|
2016-05-03 14:28:01 +00:00
|
|
|
|
|
|
|
expect(count).to eq(1)
|
|
|
|
end
|
2016-06-29 17:48:12 +00:00
|
|
|
|
|
|
|
it 'should not save event, and log an unretryable exception' do
|
|
|
|
e = event
|
|
|
|
original_event = e.get('message')
|
|
|
|
e.set('message', nil)
|
|
|
|
|
|
|
|
expect(logger).to receive(:error).once.with(/JDBC - Exception. Not retrying/, Hash)
|
|
|
|
expect { plugin.multi_receive([event]) }.to_not raise_error
|
|
|
|
|
|
|
|
e.set('message', original_event)
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'it should retry after a connection loss, and log a warning' do
|
2017-11-08 17:25:39 +00:00
|
|
|
skip "does not run as a service, or known issue with test" if systemd_database_service.nil?
|
2016-06-29 17:48:12 +00:00
|
|
|
|
|
|
|
p = plugin
|
|
|
|
|
|
|
|
# Check that everything is fine right now
|
|
|
|
expect { p.multi_receive([event]) }.not_to raise_error
|
|
|
|
|
2016-09-15 20:32:34 +00:00
|
|
|
stop_service(systemd_database_service)
|
2016-09-15 19:33:40 +00:00
|
|
|
|
|
|
|
# Start a thread to restart the service after the fact.
|
2016-09-15 20:32:34 +00:00
|
|
|
t = Thread.new(systemd_database_service) { |systemd_database_service|
|
2016-09-15 19:33:40 +00:00
|
|
|
sleep 20
|
2017-11-08 17:25:39 +00:00
|
|
|
|
2016-09-15 20:32:34 +00:00
|
|
|
start_service(systemd_database_service)
|
2016-06-29 17:48:12 +00:00
|
|
|
}
|
|
|
|
|
2016-09-15 20:32:34 +00:00
|
|
|
t.run
|
2017-11-08 17:25:39 +00:00
|
|
|
|
2016-06-29 17:48:12 +00:00
|
|
|
expect(logger).to receive(:warn).at_least(:once).with(/JDBC - Exception. Retrying/, Hash)
|
|
|
|
expect { p.multi_receive([event]) }.to_not raise_error
|
|
|
|
|
2016-09-15 20:32:34 +00:00
|
|
|
# Wait for the thread to finish
|
2016-06-29 17:48:12 +00:00
|
|
|
t.join
|
|
|
|
end
|
2016-05-03 14:28:01 +00:00
|
|
|
end
|