Compare commits
62 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6bae1d81e3 | ||
|
|
667e066d74 | ||
|
|
8e15bc5f45 | ||
|
|
43142287de | ||
|
|
2a6f048fa0 | ||
|
|
318c1bd86a | ||
|
|
3085606eb7 | ||
|
|
f0d88a237f | ||
|
|
ca1c71ea68 | ||
|
|
cb4cefdfad | ||
|
|
44e1947f31 | ||
|
|
64a6bcfd55 | ||
|
|
238ef153e4 | ||
|
|
3bdd8ef3a8 | ||
|
|
9164605aae | ||
|
|
d2f99b05d2 | ||
|
|
c110bdd551 | ||
|
|
b3a6de6340 | ||
|
|
37631a62b7 | ||
|
|
76e0f439a0 | ||
|
|
43eb5d969d | ||
|
|
0f37792177 | ||
|
|
0e2e883cd1 | ||
|
|
34708157f4 | ||
|
|
6c852d21dc | ||
|
|
53eaee001d | ||
|
|
fe131f750e | ||
|
|
f04e00019b | ||
|
|
867fd37805 | ||
|
|
b3e8d1a0f8 | ||
|
|
25d14f2624 | ||
|
|
ab566ee969 | ||
|
|
7d699e400c | ||
|
|
542003e4e5 | ||
|
|
290aa63d2d | ||
|
|
a61dd21046 | ||
|
|
4a573cb599 | ||
|
|
80560f6692 | ||
|
|
2c00c5d016 | ||
|
|
a26b6106d4 | ||
|
|
d6869f594c | ||
|
|
5ec985b0df | ||
|
|
5a1fdb7c7f | ||
|
|
b14d61ccf0 | ||
|
|
baaeba3c07 | ||
|
|
d362e791e5 | ||
|
|
5f0f897114 | ||
|
|
721e128f29 | ||
|
|
fe2e23ac27 | ||
|
|
85b3f31051 | ||
|
|
e32b6e9bbd | ||
|
|
f1202f6454 | ||
|
|
26a32079f1 | ||
|
|
8d27e0f90d | ||
|
|
df811f3d29 | ||
|
|
d056093ab8 | ||
|
|
e83af287f0 | ||
|
|
0ff6f16ec7 | ||
|
|
e6e9ac3b04 | ||
|
|
707c005979 | ||
|
|
8f5ceb451a | ||
|
|
e6537d053f |
26
.github/ISSUE_TEMPLATE.md
vendored
Normal file
26
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
<!--
|
||||
|
||||
Trouble installing the plugin under Logstash 2.4.0 with the message "duplicate gems"? See https://github.com/elastic/logstash/issues/5852
|
||||
|
||||
Please remember:
|
||||
- I have not used every database engine in the world
|
||||
- I have not got access to every database engine in the world
|
||||
- Any support I provide is done in my own personal time which is limited
|
||||
- Understand that I won't always have the answer immediately
|
||||
|
||||
Please provide as much information as possible.
|
||||
|
||||
-->
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected & Actual Behavior
|
||||
<!--- If you're describing a bug, tell us what should happen, and what is actually happening, and if necessary how to reproduce it -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version of plugin used:
|
||||
* Version of Logstash used:
|
||||
* Database engine & version you're connecting to:
|
||||
* Have you checked you've met the Logstash requirements for Java versions?:
|
||||
25
.rubocop.yml
Normal file
25
.rubocop.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
# I don't care for underscores in numbers.
|
||||
Style/NumericLiterals:
|
||||
Enabled: false
|
||||
|
||||
Style/ClassAndModuleChildren:
|
||||
Enabled: false
|
||||
|
||||
Metrics/AbcSize:
|
||||
Enabled: false
|
||||
|
||||
Metrics/CyclomaticComplexity:
|
||||
Max: 9
|
||||
|
||||
Metrics/PerceivedComplexity:
|
||||
Max: 10
|
||||
|
||||
Metrics/LineLength:
|
||||
Enabled: false
|
||||
|
||||
Metrics/MethodLength:
|
||||
Max: 50
|
||||
|
||||
Style/FileName:
|
||||
Exclude:
|
||||
- 'lib/logstash-output-jdbc_jars.rb'
|
||||
@@ -2,7 +2,9 @@ sudo: required
|
||||
language: ruby
|
||||
cache: bundler
|
||||
rvm:
|
||||
- jruby
|
||||
- jruby-1.7.25
|
||||
jdk:
|
||||
- oraclejdk8
|
||||
before_script:
|
||||
- bundle exec rake vendor
|
||||
- bundle exec rake install_jars
|
||||
|
||||
10
CHANGELOG.md
10
CHANGELOG.md
@@ -1,6 +1,16 @@
|
||||
# Change Log
|
||||
All notable changes to this project will be documented in this file, from 0.2.0.
|
||||
|
||||
## [5.1.0] - 2016-12-17
|
||||
- phoenix-thin fixes for issue #60
|
||||
|
||||
## [5.0.0] - 2016-11-03
|
||||
- logstash v5 support
|
||||
|
||||
## [0.3.1] - 2016-08-28
|
||||
- Adds connection_test configuration option, to prevent the connection test from occuring, allowing the error to be suppressed.
|
||||
Useful for cockroachdb deployments. https://github.com/theangryangel/logstash-output-jdbc/issues/53
|
||||
|
||||
## [0.3.0] - 2016-07-24
|
||||
- Brings tests from v5 branch, providing greater coverage
|
||||
- Removes bulk update support, due to inconsistent behaviour
|
||||
|
||||
@@ -21,7 +21,7 @@ See CHANGELOG.md
|
||||
Released versions are available via rubygems, and typically tagged.
|
||||
|
||||
For development:
|
||||
- See master branch for logstash v5 (currently **development only**)
|
||||
- See master branch for logstash v5
|
||||
- See v2.x branch for logstash v2
|
||||
- See v1.5 branch for logstash v1.5
|
||||
- See v1.4 branch for logstash 1.4
|
||||
@@ -43,11 +43,13 @@ For development:
|
||||
| driver_auto_commit | Boolean | If the driver does not support auto commit, you should set this to false | No | True |
|
||||
| driver_jar_path | String | File path to jar file containing your JDBC driver. This is optional, and all JDBC jars may be placed in $LOGSTASH_HOME/vendor/jar/jdbc instead. | No | |
|
||||
| connection_string | String | JDBC connection URL | Yes | |
|
||||
| connection_test | Boolean | Run a JDBC connection test. Some drivers do not function correctly, and you may need to disable the connection test to supress an error. Cockroach with the postgres JDBC driver is such an example. | No | Yes |
|
||||
| connection_test_query | String | Connection test and init query string, required for some JDBC drivers that don't support isValid(). Typically you'd set to this "SELECT 1" | No | |
|
||||
| username | String | JDBC username - this is optional as it may be included in the connection string, for many drivers | No | |
|
||||
| password | String | JDBC password - this is optional as it may be included in the connection string, for many drivers | No | |
|
||||
| statement | Array | An array of strings representing the SQL statement to run. Index 0 is the SQL statement that is prepared, all other array entries are passed in as parameters (in order). A parameter may either be a property of the event (i.e. "@timestamp", or "host") or a formatted string (i.e. "%{host} - %{message}" or "%{message}"). If a key is passed then it will be automatically converted as required for insertion into SQL. If it's a formatted string then it will be passed in verbatim. | Yes | |
|
||||
| unsafe_statement | Boolean | If yes, the statement is evaluated for event fields - this allows you to use dynamic table names, etc. **This is highly dangerous** and you should **not** use this unless you are 100% sure that the field(s) you are passing in are 100% safe. Failure to do so will result in possible SQL injections. Please be aware that there is also a potential performance penalty as each event must be evaluated and inserted into SQL one at a time, where as when this is false multiple events are inserted at once. Example statement: [ "insert into %{table_name_field} (column) values(?)", "fieldname" ] | No | False |
|
||||
| max_pool_size | Number | Maximum number of connections to open to the SQL server at any 1 time. Default set to same as Logstash default number of workers | No | 24 |
|
||||
| unsafe_statement | Boolean | If yes, the statement is evaluated for event fields - this allows you to use dynamic table names, etc. **This is highly dangerous** and you should **not** use this unless you are 100% sure that the field(s) you are passing in are 100% safe. Failure to do so will result in possible SQL injections. Example statement: [ "insert into %{table_name_field} (column) values(?)", "fieldname" ] | No | False |
|
||||
| max_pool_size | Number | Maximum number of connections to open to the SQL server at any 1 time | No | 5 |
|
||||
| connection_timeout | Number | Number of seconds before a SQL connection is closed | No | 2800 |
|
||||
| flush_size | Number | Maximum number of entries to buffer before sending to SQL - if this is reached before idle_flush_time | No | 1000 |
|
||||
| max_flush_exceptions | Number | Number of sequential flushes which cause an exception, before the set of events are discarded. Set to a value less than 1 if you never want it to stop. This should be carefully configured with respect to retry_initial_interval and retry_max_interval, if your SQL server is not highly available | No | 10 |
|
||||
|
||||
36
Vagrantfile
vendored
Normal file
36
Vagrantfile
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
|
||||
config.vm.define "debian" do |deb|
|
||||
deb.vm.box = 'debian/jessie64'
|
||||
deb.vm.synced_folder '.', '/vagrant', type: :virtualbox
|
||||
|
||||
deb.vm.provision 'shell', inline: <<-EOP
|
||||
echo "deb http://ftp.debian.org/debian jessie-backports main" | tee --append /etc/apt/sources.list > /dev/null
|
||||
sed -i 's/main/main contrib non-free/g' /etc/apt/sources.list
|
||||
apt-get update
|
||||
apt-get remove openjdk-7-jre-headless -y -q
|
||||
apt-get install git openjdk-8-jre curl -y -q
|
||||
gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
|
||||
curl -sSL https://get.rvm.io | bash -s stable --ruby=jruby-1.7
|
||||
usermod -a -G rvm vagrant
|
||||
EOP
|
||||
end
|
||||
|
||||
config.vm.define "centos" do |centos|
|
||||
centos.vm.box = 'centos/7'
|
||||
centos.ssh.insert_key = false # https://github.com/mitchellh/vagrant/issues/7610
|
||||
centos.vm.synced_folder '.', '/vagrant', type: :virtualbox
|
||||
|
||||
centos.vm.provision 'shell', inline: <<-EOP
|
||||
yum update
|
||||
yum install java-1.7.0-openjdk
|
||||
gpg2 --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
|
||||
curl -sSL https://get.rvm.io | bash -s stable --ruby=jruby-1.7
|
||||
usermod -a -G rvm vagrant
|
||||
EOP
|
||||
end
|
||||
|
||||
end
|
||||
@@ -1,6 +1,7 @@
|
||||
# Example: Apache Phoenix (HBase SQL)
|
||||
* Tested with Ubuntu 14.04.03 / Logstash 2.1 / Apache Phoenix 4.6
|
||||
* <!> HBase and Zookeeper must be both accessible from logstash machine <!>
|
||||
* Please see apache-phoenix-thin-hbase-sql for phoenix-thin. The examples are different.
|
||||
```
|
||||
input
|
||||
{
|
||||
|
||||
28
examples/apache-phoenix-thin-hbase-sql.md
Normal file
28
examples/apache-phoenix-thin-hbase-sql.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Example: Apache Phoenix-Thin (HBase SQL)
|
||||
|
||||
**There are special instructions for phoenix-thin. Please read carefully!**
|
||||
|
||||
* Tested with Logstash 5.1.1 / Apache Phoenix 4.9
|
||||
* HBase and Zookeeper must be both accessible from logstash machine
|
||||
* At time of writing phoenix-client does not include all the required jars (see https://issues.apache.org/jira/browse/PHOENIX-3476), therefore you must *not* use the driver_jar_path configuration option and instead:
|
||||
- `mkdir -p vendor/jar/jdbc` in your logstash installation path
|
||||
- copy `phoenix-queryserver-client-4.9.0-HBase-1.2.jar` from the phoenix distribution into this folder
|
||||
- download the calcite jar from https://mvnrepository.com/artifact/org.apache.calcite/calcite-avatica/1.6.0 and place it into your `vendor/jar/jdbc` directory
|
||||
* Use the following configuration as a base. The connection_test => false and connection_test_query are very important and should not be omitted. Phoenix-thin does not appear to support isValid and these are necessary for the connection to be added to the pool and be available.
|
||||
|
||||
```
|
||||
input
|
||||
{
|
||||
stdin { }
|
||||
}
|
||||
output {
|
||||
jdbc {
|
||||
connection_test => false
|
||||
connection_test_query => "select 1"
|
||||
driver_class => "org.apache.phoenix.queryserver.client.Driver"
|
||||
connection_string => "jdbc:phoenix:thin:url=http://localhost:8765;serialization=PROTOBUF"
|
||||
statement => [ "UPSERT INTO log (host, timestamp, message) VALUES(?, ?, ?)", "host", "@timestamp", "message" ]
|
||||
}
|
||||
|
||||
}
|
||||
```
|
||||
18
examples/cockroachdb.md
Normal file
18
examples/cockroachdb.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Example: CockroachDB
|
||||
- Tested using postgresql-9.4.1209.jre6.jar
|
||||
- **Warning** cockroach is known to throw a warning on connection test (at time of writing), thus the connection test is explicitly disabled.
|
||||
|
||||
```
|
||||
input
|
||||
{
|
||||
stdin { }
|
||||
}
|
||||
output {
|
||||
jdbc {
|
||||
driver_jar_path => '/opt/postgresql-9.4.1209.jre6.jar'
|
||||
connection_test => false
|
||||
connection_string => 'jdbc:postgresql://127.0.0.1:26257/test?user=root'
|
||||
statement => [ "INSERT INTO log (host, timestamp, message) VALUES(?, CAST (? AS timestamp), ?)", "host", "@timestamp", "message" ]
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -9,8 +9,9 @@ input
|
||||
}
|
||||
output {
|
||||
jdbc {
|
||||
driver_class => "com.mysql.jdbc.Driver"
|
||||
connection_string => "jdbc:mysql://HOSTNAME/DATABASE?user=USER&password=PASSWORD"
|
||||
statement => [ "INSERT INTO log (host, timestamp, message) VALUES(?, CAST (? AS timestamp), ?)", "host", "@timestamp", "message" ]
|
||||
statement => [ "INSERT INTO log (host, timestamp, message) VALUES(?, CAST(? AS timestamp), ?)", "host", "@timestamp", "message" ]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# Example: SQL Server
|
||||
* Tested using http://msdn.microsoft.com/en-gb/sqlserver/aa937724.aspx
|
||||
* Known to be working with Microsoft SQL Server Always-On Cluster (see https://github.com/theangryangel/logstash-output-jdbc/issues/37). With thanks to [@phr0gz](https://github.com/phr0gz)
|
||||
```
|
||||
input
|
||||
{
|
||||
@@ -7,7 +8,7 @@ input
|
||||
}
|
||||
output {
|
||||
jdbc {
|
||||
connection_string => "jdbc:sqlserver://server:1433;databaseName=databasename;user=username;password=password;autoReconnect=true;"
|
||||
connection_string => "jdbc:sqlserver://server:1433;databaseName=databasename;user=username;password=password"
|
||||
statement => [ "INSERT INTO log (host, timestamp, message) VALUES(?, ?, ?)", "host", "@timestamp", "message" ]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ output {
|
||||
stdout { }
|
||||
|
||||
jdbc {
|
||||
driver_class => "org.sqlite.JDBC"
|
||||
connection_string => 'jdbc:sqlite:test.db'
|
||||
statement => [ "INSERT INTO log (host, timestamp, message) VALUES(?, ?, ?)", "host", "@timestamp", "message" ]
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# encoding: utf-8
|
||||
require 'logstash/environment'
|
||||
|
||||
root_dir = File.expand_path(File.join(File.dirname(__FILE__), ".."))
|
||||
LogStash::Environment.load_runtime_jars! File.join(root_dir, "vendor")
|
||||
root_dir = File.expand_path(File.join(File.dirname(__FILE__), '..'))
|
||||
LogStash::Environment.load_runtime_jars! File.join(root_dir, 'vendor')
|
||||
|
||||
@@ -12,7 +12,7 @@ require 'logstash-output-jdbc_jars'
|
||||
# includes correctly crafting the SQL statement, and matching the number of
|
||||
# parameters correctly.
|
||||
class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
||||
declare_threadsafe! if self.respond_to?(:declare_threadsafe!)
|
||||
concurrency :shared
|
||||
|
||||
STRFTIME_FMT = '%Y-%m-%d %T.%L'.freeze
|
||||
|
||||
@@ -83,6 +83,13 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
||||
# Suitable for configuring retryable custom JDBC SQL state codes.
|
||||
config :retry_sql_states, validate: :array, default: []
|
||||
|
||||
# Run a connection test on start.
|
||||
config :connection_test, validate: :boolean, default: true
|
||||
|
||||
# Connection test and init string, required for some JDBC endpoints
|
||||
# notable phoenix-thin - see logstash-output-jdbc issue #60
|
||||
config :connection_test_query, validate: :string, required: false
|
||||
|
||||
# Maximum number of sequential failed attempts, before we stop retrying.
|
||||
# If set to < 1, then it will infinitely retry.
|
||||
# At the default values this is a little over 10 minutes
|
||||
@@ -95,7 +102,6 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
||||
def register
|
||||
@logger.info('JDBC - Starting up')
|
||||
|
||||
LogStash::Logger.setup_log4j(@logger)
|
||||
load_jar_files!
|
||||
|
||||
@stopping = Concurrent::AtomicBoolean.new(false)
|
||||
@@ -119,10 +125,6 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
||||
end
|
||||
end
|
||||
|
||||
def receive(event)
|
||||
retrying_submit([event])
|
||||
end
|
||||
|
||||
def close
|
||||
@stopping.make_true
|
||||
@pool.close
|
||||
@@ -148,10 +150,17 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
||||
|
||||
validate_connection_timeout = (@connection_timeout / 1000) / 2
|
||||
|
||||
if !@connection_test_query.nil? and @connection_test_query.length > 1
|
||||
@pool.setConnectionTestQuery(@connection_test_query)
|
||||
@pool.setConnectionInitSql(@connection_test_query)
|
||||
end
|
||||
|
||||
return unless @connection_test
|
||||
|
||||
# Test connection
|
||||
test_connection = @pool.getConnection
|
||||
unless test_connection.isValid(validate_connection_timeout)
|
||||
@logger.error('JDBC - Connection is not valid. Please check connection string or that your JDBC endpoint is available.')
|
||||
@logger.warn('JDBC - Connection is not reporting as validate. Either connection is invalid, or driver is not getting the appropriate response.')
|
||||
end
|
||||
test_connection.close
|
||||
end
|
||||
@@ -172,13 +181,13 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
||||
File.join(File.dirname(__FILE__), '../../../vendor/jar/jdbc/*.jar')
|
||||
end
|
||||
|
||||
@logger.debug('JDBC - jarpath', path: jarpath)
|
||||
@logger.trace('JDBC - jarpath', path: jarpath)
|
||||
|
||||
jars = Dir[jarpath]
|
||||
raise LogStash::ConfigurationError, 'JDBC - No jars found. Have you read the README?' if jars.empty?
|
||||
|
||||
jars.each do |jar|
|
||||
@logger.debug('JDBC - Loaded jar', jar: jar)
|
||||
@logger.trace('JDBC - Loaded jar', jar: jar)
|
||||
require jar
|
||||
end
|
||||
end
|
||||
@@ -253,7 +262,7 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
||||
def add_statement_event_params(statement, event)
|
||||
@statement[1..-1].each_with_index do |i, idx|
|
||||
if i.is_a? String
|
||||
value = event[i]
|
||||
value = event.get(i)
|
||||
if value.nil? and i =~ /%\{/
|
||||
value = event.sprintf(i)
|
||||
end
|
||||
@@ -275,7 +284,11 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
||||
# strftime appears to be the most reliable across drivers.
|
||||
statement.setString(idx + 1, value.time.strftime(STRFTIME_FMT))
|
||||
when Fixnum, Integer
|
||||
statement.setInt(idx + 1, value)
|
||||
if value > 2147483647 or value < -2147483648
|
||||
statement.setLong(idx + 1, value)
|
||||
else
|
||||
statement.setInt(idx + 1, value)
|
||||
end
|
||||
when Float
|
||||
statement.setFloat(idx + 1, value)
|
||||
when String
|
||||
@@ -303,7 +316,7 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
||||
log_method = (retrying ? 'warn' : 'error')
|
||||
|
||||
loop do
|
||||
@logger.send(log_method, log_text, :exception => current_exception, :backtrace => current_exception.backtrace)
|
||||
@logger.send(log_method, log_text, :exception => current_exception)
|
||||
|
||||
if current_exception.respond_to? 'getNextException'
|
||||
current_exception = current_exception.getNextException()
|
||||
|
||||
17
log4j2.xml
Normal file
17
log4j2.xml
Normal file
@@ -0,0 +1,17 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Configuration>
|
||||
<Appenders>
|
||||
<File name="file" fileName="log4j2.log">
|
||||
<PatternLayout pattern="%d{yyyy-mm-dd HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/>
|
||||
</File>
|
||||
</Appenders>
|
||||
|
||||
<Loggers>
|
||||
<!-- If we need to figure out whats happening for development purposes, disable this -->
|
||||
<Logger name="com.zaxxer.hikari" level="off" />
|
||||
|
||||
<Root level="debug">
|
||||
<AppenderRef ref="file"/>
|
||||
</Root>
|
||||
</Loggers>
|
||||
</Configuration>
|
||||
@@ -1,13 +1,13 @@
|
||||
Gem::Specification.new do |s|
|
||||
s.name = 'logstash-output-jdbc'
|
||||
s.version = "0.3.0"
|
||||
s.licenses = [ "Apache License (2.0)" ]
|
||||
s.summary = "This plugin allows you to output to SQL, via JDBC"
|
||||
s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program"
|
||||
s.authors = ["the_angry_angel"]
|
||||
s.email = "karl+github@theangryangel.co.uk"
|
||||
s.homepage = "https://github.com/theangryangel/logstash-output-jdbc"
|
||||
s.require_paths = [ "lib" ]
|
||||
s.version = '5.1.0'
|
||||
s.licenses = ['Apache License (2.0)']
|
||||
s.summary = 'This plugin allows you to output to SQL, via JDBC'
|
||||
s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install 'logstash-output-jdbc'. This gem is not a stand-alone program"
|
||||
s.authors = ['the_angry_angel']
|
||||
s.email = 'karl+github@theangryangel.co.uk'
|
||||
s.homepage = 'https://github.com/theangryangel/logstash-output-jdbc'
|
||||
s.require_paths = ['lib']
|
||||
|
||||
# Java only
|
||||
s.platform = 'java'
|
||||
@@ -15,18 +15,18 @@ Gem::Specification.new do |s|
|
||||
# Files
|
||||
s.files = Dir.glob('{lib,spec}/**/*.rb') + Dir.glob('vendor/**/*') + %w(LICENSE.txt README.md)
|
||||
|
||||
# Tests
|
||||
# Tests
|
||||
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
||||
|
||||
# Special flag to let us know this is actually a logstash plugin
|
||||
s.metadata = { "logstash_plugin" => "true", "logstash_group" => "output" }
|
||||
s.metadata = { 'logstash_plugin' => 'true', 'logstash_group' => 'output' }
|
||||
|
||||
# Gem dependencies
|
||||
s.add_runtime_dependency 'logstash-core-plugin-api', '~> 1.0'
|
||||
s.add_runtime_dependency 'logstash-core-plugin-api', '>= 1.60', '<= 2.99'
|
||||
s.add_runtime_dependency 'stud'
|
||||
s.add_runtime_dependency 'logstash-codec-plain'
|
||||
|
||||
s.requirements << "jar 'com.zaxxer:HikariCP', '2.4.2'"
|
||||
s.requirements << "jar 'com.zaxxer:HikariCP', '2.4.7'"
|
||||
s.requirements << "jar 'org.slf4j:slf4j-log4j12', '1.7.21'"
|
||||
|
||||
s.add_development_dependency 'jar-dependencies'
|
||||
@@ -34,5 +34,5 @@ Gem::Specification.new do |s|
|
||||
|
||||
s.add_development_dependency 'logstash-devutils'
|
||||
|
||||
s.add_development_dependency 'rubocop'
|
||||
s.add_development_dependency 'rubocop', '0.41.2'
|
||||
end
|
||||
|
||||
19
scripts/minutes_to_retries.rb
Executable file
19
scripts/minutes_to_retries.rb
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env ruby -w
|
||||
|
||||
seconds_to_reach = 10 * 60
|
||||
retry_max_interval = 128
|
||||
|
||||
current_interval = 2
|
||||
total_interval = 0
|
||||
exceptions_count = 1
|
||||
|
||||
loop do
|
||||
break if total_interval > seconds_to_reach
|
||||
exceptions_count += 1
|
||||
|
||||
current_interval = current_interval * 2 > retry_max_interval ? retry_max_interval : current_interval * 2
|
||||
|
||||
total_interval += current_interval
|
||||
end
|
||||
|
||||
puts exceptions_count
|
||||
@@ -4,6 +4,32 @@ require 'stud/temporary'
|
||||
require 'java'
|
||||
require 'securerandom'
|
||||
|
||||
RSpec.configure do |c|
|
||||
|
||||
def start_service(name)
|
||||
cmd = "sudo /etc/init.d/#{name}* start"
|
||||
|
||||
`which systemctl`
|
||||
if $?.success?
|
||||
cmd = "sudo systemctl start #{name}"
|
||||
end
|
||||
|
||||
`#{cmd}`
|
||||
end
|
||||
|
||||
def stop_service(name)
|
||||
cmd = "sudo /etc/init.d/#{name}* stop"
|
||||
|
||||
`which systemctl`
|
||||
if $?.success?
|
||||
cmd = "sudo systemctl stop #{name}"
|
||||
end
|
||||
|
||||
`#{cmd}`
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
RSpec.shared_context 'rspec setup' do
|
||||
it 'ensure jar is available' do
|
||||
expect(ENV[jdbc_jar_env]).not_to be_nil, "#{jdbc_jar_env} not defined, required to run tests"
|
||||
@@ -20,7 +46,9 @@ RSpec.shared_context 'when initializing' do
|
||||
end
|
||||
|
||||
RSpec.shared_context 'when outputting messages' do
|
||||
let(:logger) { double("logger") }
|
||||
let(:logger) {
|
||||
double("logger")
|
||||
}
|
||||
|
||||
let(:jdbc_test_table) do
|
||||
'logstash_output_jdbc_test'
|
||||
@@ -31,11 +59,11 @@ RSpec.shared_context 'when outputting messages' do
|
||||
end
|
||||
|
||||
let(:jdbc_create_table) do
|
||||
"CREATE table #{jdbc_test_table} (created_at datetime not null, message varchar(512) not null, message_sprintf varchar(512) not null, static_int int not null, static_bit bit not null)"
|
||||
"CREATE table #{jdbc_test_table} (created_at datetime not null, message varchar(512) not null, message_sprintf varchar(512) not null, static_int int not null, static_bit bit not null, static_bigint bigint not null)"
|
||||
end
|
||||
|
||||
let(:jdbc_statement) do
|
||||
["insert into #{jdbc_test_table} (created_at, message, message_sprintf, static_int, static_bit) values(?, ?, ?, ?, ?)", '@timestamp', 'message', 'sprintf-%{message}', 1, true]
|
||||
["insert into #{jdbc_test_table} (created_at, message, message_sprintf, static_int, static_bit, static_bigint) values(?, ?, ?, ?, ?, ?)", '@timestamp', 'message', 'sprintf-%{message}', 1, true, 4000881632477184]
|
||||
end
|
||||
|
||||
let(:systemd_database_service) do
|
||||
@@ -43,16 +71,27 @@ RSpec.shared_context 'when outputting messages' do
|
||||
end
|
||||
|
||||
let(:event_fields) do
|
||||
{ 'message' => "test-message #{SecureRandom.uuid}" }
|
||||
{ message: "test-message #{SecureRandom.uuid}" }
|
||||
end
|
||||
|
||||
let(:event) { LogStash::Event.new(event_fields) }
|
||||
|
||||
let(:plugin) do
|
||||
# Setup logger
|
||||
allow(LogStash::Outputs::Jdbc).to receive(:logger).and_return(logger)
|
||||
|
||||
# XXX: Suppress reflection logging. There has to be a better way around this.
|
||||
allow(logger).to receive(:debug).with(/config LogStash::/)
|
||||
|
||||
# Suppress beta warnings.
|
||||
allow(logger).to receive(:info).with(/Please let us know if you find bugs or have suggestions on how to improve this plugin./)
|
||||
|
||||
# Suppress start up messages.
|
||||
expect(logger).to receive(:info).once.with(/JDBC - Starting up/)
|
||||
|
||||
# Setup plugin
|
||||
output = LogStash::Plugin.lookup('output', 'jdbc').new(jdbc_settings)
|
||||
output.register
|
||||
output.logger = logger
|
||||
|
||||
# Setup table
|
||||
c = output.instance_variable_get(:@pool).getConnection
|
||||
@@ -82,7 +121,7 @@ RSpec.shared_context 'when outputting messages' do
|
||||
# Verify the number of items in the output table
|
||||
c = plugin.instance_variable_get(:@pool).getConnection
|
||||
stmt = c.prepareStatement("select count(*) as total from #{jdbc_test_table} where message = ?")
|
||||
stmt.setString(1, event['message'])
|
||||
stmt.setString(1, event.get('message'))
|
||||
rs = stmt.executeQuery
|
||||
count = 0
|
||||
count = rs.getInt('total') while rs.next
|
||||
@@ -93,10 +132,14 @@ RSpec.shared_context 'when outputting messages' do
|
||||
end
|
||||
|
||||
it 'should not save event, and log an unretryable exception' do
|
||||
e = LogStash::Event.new({})
|
||||
e = event
|
||||
original_event = e.get('message')
|
||||
e.set('message', nil)
|
||||
|
||||
expect(logger).to receive(:error).once.with(/JDBC - Exception. Not retrying/, Hash)
|
||||
expect { plugin.multi_receive([e]) }.to_not raise_error
|
||||
expect { plugin.multi_receive([event]) }.to_not raise_error
|
||||
|
||||
e.set('message', original_event)
|
||||
end
|
||||
|
||||
it 'it should retry after a connection loss, and log a warning' do
|
||||
@@ -107,29 +150,21 @@ RSpec.shared_context 'when outputting messages' do
|
||||
# Check that everything is fine right now
|
||||
expect { p.multi_receive([event]) }.not_to raise_error
|
||||
|
||||
# Start a thread to stop and restart the service.
|
||||
stop_service(systemd_database_service)
|
||||
|
||||
# Start a thread to restart the service after the fact.
|
||||
t = Thread.new(systemd_database_service) { |systemd_database_service|
|
||||
start_stop_cmd = 'sudo /etc/init.d/%<service>s* %<action>s'
|
||||
sleep 20
|
||||
|
||||
`which systemctl`
|
||||
if $?.success?
|
||||
start_stop_cmd = 'sudo systemctl %<action>s %<service>s'
|
||||
end
|
||||
|
||||
cmd = start_stop_cmd % { action: 'stop', service: systemd_database_service }
|
||||
`#{cmd}`
|
||||
sleep 10
|
||||
|
||||
cmd = start_stop_cmd % { action: 'start', service: systemd_database_service }
|
||||
`#{cmd}`
|
||||
start_service(systemd_database_service)
|
||||
}
|
||||
|
||||
# Wait a few seconds to the service to stop
|
||||
sleep 5
|
||||
t.run
|
||||
|
||||
expect(logger).to receive(:warn).at_least(:once).with(/JDBC - Exception. Retrying/, Hash)
|
||||
expect { p.multi_receive([event]) }.to_not raise_error
|
||||
|
||||
# Wait for the thread to finish
|
||||
t.join
|
||||
end
|
||||
end
|
||||
|
||||
@@ -10,7 +10,7 @@ describe 'logstash-output-jdbc: derby', if: ENV['JDBC_DERBY_JAR'] do
|
||||
end
|
||||
|
||||
let(:jdbc_create_table) do
|
||||
"CREATE table #{jdbc_test_table} (created_at timestamp not null, message varchar(512) not null, message_sprintf varchar(512) not null, static_int int not null, static_bit boolean not null)"
|
||||
"CREATE table #{jdbc_test_table} (created_at timestamp not null, message varchar(512) not null, message_sprintf varchar(512) not null, static_int int not null, static_bit boolean not null, static_bigint bigint not null)"
|
||||
end
|
||||
|
||||
let(:jdbc_settings) do
|
||||
|
||||
Reference in New Issue
Block a user