ELK
Basic System Prep
Update the host with all the latest patches
yum -y update
Throw on a couple of apps to make the box easier to work with
yum -y install net-tools ntp vim net-snmp-utils curl screen bind-utils rsync wget
Open the firewall-cmd ports, make sure to change the forwarding port.
firewall-cmd --add-port=9200/tcp --permanent;
firewall-cmd --add-port=9300/tcp --permanent;
firewall-cmd --add-port=5601/tcp --permanent;
firewall-cmd --add-port=5044/tcp --permanent;
firewall-cmd --add-port=514/tcp --permanent;
firewall-cmd --add-port=514/udp --permanent;
firewall-cmd --add-port=5014/tcp --permanent;
firewall-cmd --add-port=5014/udp --permanent;
firewall-cmd --add-forward-port=port=514:proto=udp:toport=5014:toaddr=192.168.3.110 --permanent;
firewall-cmd --add-forward-port=port=514:proto=tcp:toport=5014:toaddr=192.168.3.110 --permanent;
firewall-cmd --reload;
Install Java8
Install OpenJDK8 JRE
yum -y install java-1.8.0-openjdk;
Install and Configure Elasticsearch
Download the required rpm’s
Download and install the necessary packages
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.3.2-x86_64.rpm;
wget https://artifacts.elastic.co/downloads/kibana/kibana-7.3.2-x86_64.rpm;
rpm -ivh elasticsearch-7.3.2-x86_64.rpm;
rpm -ivh kibana-7.3.2-x86_64.rpm;
Install Elasticsearch
And then modify the line config file /etc/elasticsearch/elasticsearch.yml, so that it will accept connections on the network, along with editing the node name and other node addresses.
mkdir /home/elasticsearch/;
chown -R elasticsearch /home/elasticsearch/;
sed -i -e 's/path.data: \/var\/lib\/elasticsearch/path.data: \/home\/elasticsearch/' /etc/elasticsearch/elasticsearch.yml;
sed -i -e 's/#cluster.name: my-application/cluster.name: production/' /etc/elasticsearch/elasticsearch.yml;
sed -i -e 's/#discovery.seed_hosts: \["host1", "host2"\]/discovery.seed_hosts: \["elk1.test.com", "elk2.test.com", "elk3.test.com"\]/' /etc/elasticsearch/elasticsearch.yml;
sed -i -e 's/#network.host: 192.168.0.1/network.host: ${HOSTNAME}/' /etc/elasticsearch/elasticsearch.yml;
sed -i -e 's/#node.name: node-1/node.name: ${HOSTNAME}/' /etc/elasticsearch/elasticsearch.yml;
sed -i -e 's/#cluster.initial_master_nodes: \["node-1", "node-2"\]/cluster.initial_master_nodes: \["elk1.test.com", "elk2.test.com", "elk3.test.com"\]/' /etc/elasticsearch/elasticsearch.yml;
echo "xpack.ml.enabled: false" >> /etc/elasticsearch/elasticsearch.yml;
Edit the java settings to change the max and min heap size to 2GB.
sed -i -e 's/Xms1g/Xms2g/' /etc/elasticsearch/jvm.options;
sed -i -e 's/Xmx1g/Xmx2g/' /etc/elasticsearch/jvm.options;
Increase the max file descriptors and the limit for max mmap counts. Turn on swapping.
echo -e "* soft nofile 16384;\n* hard nofile 65536\n" >> /etc/security/limits.conf;
echo "net.core.somaxconn=16384" >> /etc/sysctl.conf;
echo "vm.swappiness=1" >> /etc/sysctl.conf;
echo "16384" > /proc/sys/net/core/somaxconn;
Start and enable elasticsearch
systemctl start elasticsearch;
systemctl enable elasticsearch;
To verify if elasticsearch is running, wait ~2 min then run
curl -X GET ${HOSTNAME}:9200
Install and Configure Kibana
Edit the kibana.yml config file
sed -i -e 's/#server.host: "localhost"/server.host: '${HOSTNAME}'/' /etc/kibana/kibana.yml;
sed -i -e 's/#elasticsearch.hosts: \["http:\/\/localhost:9200"\]/elasticsearch.hosts: \["http:\/\/'${HOSTNAME}':9200"\]/g' /etc/kibana/kibana.yml;
Start Kibana
systemctl start kibana;
systemctl enable kibana
Install and Configure Logstash
Since logstash creates a “logstash” user, we can run the process as root in order to open the standard syslog ports and then the process is run as logstash as seen in the screen capture.
Download and install the logstash rpm
wget https://artifacts.elastic.co/downloads/logstash/logstash-7.3.2.rpm;
rpm -ivh logstash-7.3.2.rpm;
Create another input file so logstash will listen to syslog inputs over tcp and udp.
cat >/etc/logstash/conf.d/03-syslog-input.conf <<EOL
input {
tcp {
host => "1192.168.3.110"
port => 5014
type => syslog
}
}
input {
udp {
host => "192.168.3.110"
port => 5014
type => syslog
}
}
EOL
Create a filter for syslog files
cat >/etc/logstash/conf.d/10-syslog-filter.conf <<EOL
filter {
if [fileset][module] == "system" {
if [fileset][name] == "auth" {
grok {
match => { "message" => ["%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} %{DATA:[system][auth][ssh][method]} for (invalid user )?%{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]} port %{NUMBER:[system][auth][ssh][port]} ssh2(: %{GREEDYDATA:[system][auth][ssh][signature]})?",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} user %{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: Did not receive identification string from %{IPORHOST:[system][auth][ssh][dropped_ip]}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sudo(?:\[%{POSINT:[system][auth][pid]}\])?: \s*%{DATA:[system][auth][user]} :( %{DATA:[system][auth][sudo][error]} ;)? TTY=%{DATA:[system][auth][sudo][tty]} ; PWD=%{DATA:[system][auth][sudo][pwd]} ; USER=%{DATA:[system][auth][sudo][user]} ; COMMAND=%{GREEDYDATA:[system][auth][sudo][command]}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} groupadd(?:\[%{POSINT:[system][auth][pid]}\])?: new group: name=%{DATA:system.auth.groupadd.name}, GID=%{NUMBER:system.auth.groupadd.gid}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} useradd(?:\[%{POSINT:[system][auth][pid]}\])?: new user: name=%{DATA:[system][auth][user][add][name]}, UID=%{NUMBER:[system][auth][user][add][uid]}, GID=%{NUMBER:[system][auth][user][add][gid]}, home=%{DATA:[system][auth][user][add][home]}, shell=%{DATA:[system][auth][user][add][shell]}$",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} %{DATA:[system][auth][program]}(?:\[%{POSINT:[system][auth][pid]}\])?: %{GREEDYMULTILINE:[system][auth][message]}"] }
pattern_definitions => {
"GREEDYMULTILINE"=> "(.|\n)*"
}
remove_field => "message"
}
date {
match => [ "[system][auth][timestamp]", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
geoip {
source => "[system][auth][ssh][ip]"
target => "[system][auth][ssh][geoip]"
}
}
else if [fileset][name] == "syslog" {
grok {
match => { "message" => ["%{SYSLOGTIMESTAMP:[system][syslog][timestamp]} %{SYSLOGHOST:[system][syslog][hostname]} %{DATA:[system][syslog][program]}(?:\[%{POSINT:[system][syslog][pid]}\])?: %{GREEDYMULTILINE:[system][syslog][message]}"] }
pattern_definitions => { "GREEDYMULTILINE" => "(.|\n)*" }
remove_field => "message"
}
date {
match => [ "[system][syslog][timestamp]", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
}
}
EOL
Add the output stage of logstash, to send the filtered data to elasticsearch
cat >/etc/logstash/conf.d/30-elasticsearch-output.conf <<EOL
output {
if [type] == "syslog" {
elasticsearch {
hosts => "${HOSTNAME}:9200"
index => "remote-syslog-%{+YYYY.MM.dd}"
}
}
else {
elasticsearch {
hosts => ["${HOSTNAME}:9200"]
manage_template => false
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
}
}
}
EOL
Test the logstash configuration using the following
sudo -u logstash /usr/share/logstash/bin/logstash --path.settings /etc/logstash -t
Start logstash
systemctl start logstash;
systemctl enable logstash;
Install and Configure Filebeat (As root)
Install filebeat
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.3.2-x86_64.rpm;
rpm -ivh filebeat-7.3.2-x86_64.rpm;
In the filebeat.yml file, edit the log source paths and also set log enabled to “true”. Edit the “output.elasticsearch” field to be the hostname of the elk host.
vim /etc/filebeat/filebeat.yml
- type: log
# Change to true to enable this input configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /var/log/messages
- /var/log/secure
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["<ELK_HOST>:9200"]
Enable the system module
filebeat modules enable system
Load the index template to Elasticsearch
filebeat setup --template -E output.logstash.enabled=false -E 'output.elasticsearch.hosts=["${HOSTNAME}:9200"]';
Load the dashboard to elasticsearch
filebeat setup -e -E output.logstash.enabled=false -E output.elasticsearch.hosts=['${HOSTNAME}:9200'] -E setup.kibana.host=${HOSTNAME}:5601;
Start and enable filebeat on reboot
systemctl start filebeat;
systemctl enable filebeat;
Install and Configure MetricBeat (As root)
Install MetricBeat
wget https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-7.3.2-x86_64.rpm;
rpm -ivh metricbeat-7.3.2-x86_64.rpm;
Setup the template in elasticsearch
sudo metricbeat setup --template -E 'output.elasticsearch.hosts=["${HOSTNAME}:9200"]';
Setup the dashboard in elasticsearch
metricbeat setup -e -E output.elasticsearch.hosts=['${HOSTNAME}:9200'] -E setup.kibana.host=${HOSTNAME}:5601 ;
Open the metricbeat.yml file and edit the output to go to elasticsearch host
vim /etc/metricbeat/metricbeat.yml
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["<HOSTNAME>:9200"]
Start metricbeat
systemctl start metricbeat;
systemctl enable metricbeat;
Instructions for installing arbiter (mon02.mb2)
Open the firewall-cmd ports
firewall-cmd --add-port=9200/tcp --permanent;
firewall-cmd --add-port=9300/tcp --permanent;
firewall-cmd --reload;
Download and install the necessary packages
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.3.2-x86_64.rpm;
rpm -ivh elasticsearch-7.3.2-x86_64.rpm;
Install Elasticsearch
And then modify the line config file /etc/elasticsearch/elasticsearch.yml, so that it will accept connections on the network, along with editing the node name and other node addresses.
mkdir /home/elasticsearch/;
chown -R elasticsearch /home/elasticsearch/;
sed -i -e 's/path.data: \/var\/lib\/elasticsearch/path.data: \/home\/elasticsearch/' /etc/elasticsearch/elasticsearch.yml;
sed -i -e 's/#cluster.name: my-application/cluster.name: production/' /etc/elasticsearch/elasticsearch.yml;
sed -i -e 's/#discovery.seed_hosts: \["host1", "host2"\]/discovery.seed_hosts: \["elk1.test.com", "elk2.test.com", "elk3.test.com"\]/' /etc/elasticsearch/elasticsearch.yml;
sed -i -e 's/#network.host: 192.168.0.1/network.host: ${HOSTNAME}/' /etc/elasticsearch/elasticsearch.yml;
sed -i -e 's/#node.name: node-1/node.name: ${HOSTNAME}/' /etc/elasticsearch/elasticsearch.yml;
sed -i -e 's/#cluster.initial_master_nodes: \["node-1", "node-2"\]/cluster.initial_master_nodes: \["elk1.test.com", "elk2.test.com", "elk3.test.com"\]/' /etc/elasticsearch/elasticsearch.yml;
echo "xpack.ml.enabled: false" >> /etc/elasticsearch/elasticsearch.yml;
echo "node.data: false" >> /etc/elasticsearch/elasticsearch.yml;
Edit the java settings to change the max and min heap size to 2GB.
sed -i -e 's/Xms1g/Xms2g/' /etc/elasticsearch/jvm.options;
sed -i -e 's/Xmx1g/Xmx2g/' /etc/elasticsearch/jvm.options;
Increase the max file descriptors and the limit for max mmap counts. Turn on swapping.
echo -e "* soft nofile 16384;\n* hard nofile 65536\n" >> /etc/security/limits.conf;
echo "net.core.somaxconn=16384" >> /etc/sysctl.conf;
echo "vm.swappiness=1" >> /etc/sysctl.conf;
echo "16384" > /proc/sys/net/core/somaxconn;
Start and enable elasticsearch
systemctl start elasticsearch;
systemctl enable elasticsearch;
To verify if elasticsearch is running in the cluster, wait ~2 min then run
curl -X GET ${HOSTNAME}:9200/_cluster/health?pretty