Author Archives: admin

Postfix

SSL Self certificate

# openssl genrsa -des3 -out server.key 2048
Generating RSA private key, 2048 bit long modulus
...+++
......................................................................................................................................................+++
e is 65537 (0x10001)
Enter pass phrase for server.key:
140645244917648:error:28069065:lib(40):UI_set_result:result too small:ui_lib.c:831:You must type in 4 to 1023 characters
Enter pass phrase for server.key: ***********
Verifying - Enter pass phrase for server.key: ***********
# openssl rsa -in server.key -out server.key.insecure
Enter pass phrase for server.key: ***********
writing RSA key
# mv server.key server.key.secure
# mv server.key.insecure server.key

# openssl req -new -key server.key -out server.csr
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
Country Name (2 letter code) [XX]:NL
State or Province Name (full name) []:
Locality Name (eg, city) [Default City]:Rotterdam
Organization Name (eg, company) [Default Company Ltd]: *********
Organizational Unit Name (eg, section) []:
Common Name (eg, your name or your server's hostname) []: ***********
Email Address []:

Please enter the following 'extra' attributes
to be sent with your certificate request
A challenge password []:
An optional company name []:
# openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt
Signature ok
subject=/C=NL/L=Rotterdam/O=Welgg/CN=one.welgg.com
Getting Private key
#

Elasticsearch

Installation from RPM

[root@NLRTM1-S0503 ~]# rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
[root@NLRTM1-S0503 ~]# vi /etc/yum.repos.d/elastic.repo

[logstash-6.x]
name=Elastic repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md

[elasticsearch-6.x]
name=Elasticsearch repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md

[kibana-6.x]
name=Kibana repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md

[root@NLRTM1-S0503 ~]# yum install elasticsearch
[root@NLRTM1-S0503 ~]# yum install kibana
[root@NLRTM1-S0503 ~]# yum install logstash

User preparation

Logstash

WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using –path.settings. Continuing using the defaults

[root@NLRTM1-S0503 logstash]# ./bin/logstash -t
WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults
Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console
[INFO ] 2018-12-06 00:08:16.015 [main] writabledirectory - Creating directory {:setting=>"path.queue", :path=>"/usr/share/logstash/data/queue"}
[INFO ] 2018-12-06 00:08:16.024 [main] writabledirectory - Creating directory {:setting=>"path.dead_letter_queue", :path=>"/usr/share/logstash/data/dead_letter_queue"}
ERROR: Failed to read pipelines yaml file. Location: /usr/share/logstash/config/pipelines.yml

remedy:

[support@NLRTM1-S0503 logstash]$ ./bin/logstash --path.settings /etc/logstash -t

Start logstash as a service

Check logstash service user and correct permissions

[root@NLRTM1-S0503 logstash]# vi /etc/systemd/system/logstash.service 
[root@NLRTM1-S0503 etc]# chown -R logstash:logstash /etc/logstash
[root@NLRTM1-S0503 etc]# chown -R logstash:logstash /usr/share/logstash

# chmod -R g+rwx /usr/share/logstash/
# chown -R logstash:logstash /var/log/logstash
[root@NLRTM1-S0503 logstash]# /bin/systemctl daemon-reload
[root@NLRTM1-S0503 logstash]# systemctl enable logstash.service
Created symlink from /etc/systemd/system/multi-user.target.wants/logstash.service to /etc/systemd/system/logstash.service.
[root@NLRTM1-S0503 logstash]# systemctl start logstash.service

Setting up ELK components as service

[root@cilacap etc]# chown -R logstash:logstash logstash
[root@cilacap etc]# chown -R elasticsearch:elasticsearch elasticsearch
[root@cilacap etc]# chown -R kibana:kibana kibana

[root@cilacap etc]# usermod -aG logstash elastic
[root@cilacap etc]# usermod -aG elasticsearch elastic
[root@cilacap etc]# usermod -aG kibana elastic
[root@cilacap etc]# groups elastic
elastic : elastic wheel logstash elasticsearch kibana

[root@cilacap etc]# sudo /bin/systemctl daemon-reload
[root@cilacap etc]# sudo /bin/systemctl enable elasticsearch.service
Created symlink from /etc/systemd/system/multi-user.target.wants/elasticsearch.service to /usr/lib/systemd/system/elasticsearch.service.
[root@cilacap etc]# systemctl enable logstash.service
Created symlink from /etc/systemd/system/multi-user.target.wants/logstash.service to /etc/systemd/system/logstash.service.
[root@cilacap etc]# systemctl enable kibana.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kibana.service to /etc/systemd/system/kibana.service.
[root@cilacap etc]#


# cp /etc/systemd/system/logstash-ecn4.service /etc/systemd/system/logstash-apex.service
# vi /etc/systemd/system/logstash-apex.service
# cd /etc/logstash/
# cp -R ecn4 apex
# cd apex
# vi logstash.yml 
[root@NLRTM1-S0503 logstash]# vi /etc/logstash/apex/pipelines.yml
[root@NLRTM1-S0503 logstash]# chown -R logstash:logstash apex
[root@NLRTM1-S0503 logstash]# systemctl enable logstash-apex.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/logstash-apex.service to /etc/systemd/system/logstash-apex.service. 

Filebeat module APACHE2

[root@one filebeat]# ./filebeat modules enable apache2

[root@one filebeat]# nohup ./filebeat >/dev/null 2>&1 &

[root@one filebeat]# tail logs/filebeat
2018-12-08T21:06:44.246+0100	ERROR	pipeline/output.go:100	Failed to connect to backoff(elasticsearch(http://www.atikin.nl:49200)): Connection marked as failed because the onConnect callback failed: Error loading pipeline for fileset apache2/access: This module requires the following Elasticsearch plugins: ingest-user-agent, ingest-geoip. You can install them by running the following commands on all the Elasticsearch nodes:
    sudo bin/elasticsearch-plugin install ingest-user-agent
    sudo bin/elasticsearch-plugin install ingest-geoip

Install Elasticsearch plugins

[root@cilacap ~]# cd /usr/share/elasticsearch/
[root@cilacap elasticsearch]# ./bin/elasticsearch-plugin install ingest-user-agent
-> Downloading ingest-user-agent from elastic
[=================================================] 100%   
-> Installed ingest-user-agent
[root@cilacap elasticsearch]# ./bin/elasticsearch-plugin install ingest-geoip
-> Downloading ingest-geoip from elastic
[=================================================] 100%   
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@     WARNING: plugin requires additional permissions     @
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
* java.lang.RuntimePermission accessDeclaredMembers
* java.lang.reflect.ReflectPermission suppressAccessChecks
See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html
for descriptions of what these permissions allow and the associated risks.
Continue with installation? [y/N]y
-> Installed ingest-geoip

[root@cilacap elasticsearch]# systemctl restart elasticsearch

Apache module configuration

[root@one filebeat]# vi /opt/elastic/filebeat/modules.d/apache2.yml
- module: apache2
  # Access logs
  access:
    enabled: true

    # Set custom paths for the log files. If left empty,
    # Filebeat will choose the paths depending on your OS.
    var.paths: ["/var/log/httpd/paulowna_site.com-access_log*","/var/log/httpd/paulowna_shop.com-access_log*" ]

  # Error logs
  error:
    enabled: true

    # Set custom paths for the log files. If left empty,
    # Filebeat will choose the paths depending on your OS.
    var.paths: ["/var/log/httpd/paulowna_site.com_error_log", "/var/log/httpd/paulowna_shop.com-error_log"]

Run filebeat

[root@one filebeat]# /usr/share/filebeat/bin/filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat -d "publish" 

Run filebeat in background

nohup /usr/share/filebeat/bin/filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat  -d "publish" 2>&1 >/dev/null &

Journalctl

[root@NLRTM1-S0503 es00]# journalctl -u elasticsearch.service

SQLServer JDBC Driver and Maven

com.microsoft.sqlserver
sqljdbc41
4.1
runtime

The issue is that Maven can’t find this artifact in any of the configured maven repositories.

Unfortunately Microsoft don’t make this available via any maven repository. You need to download the jar from the Microsoft website, and then manually install it into your local maven repository.

You can do this with the following maven command:

mvn install:install-file -Dfile=sqljdbc4.jar -DgroupId=com.microsoft.sqlserver -DartifactId=sqljdbc4 -Dversion=4.0 -Dpackaging=jar

Then next time you run maven on your POM it will find the artifact.

How To Install Apache Maven on CentOS/RHEL 7/6/5

Step 1 – Install Apache Maven

After verifying java version on you system. Download Apache maven from its official website or use following command to download Apache Maven 3.3.9.

$ cd /opt
$ wget http://www-eu.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz

Now extract downloaded archive using following command.

$ sudo tar xzf apache-maven-3.3.9-bin.tar.gz
$ sudo ln -s apache-maven-3.3.9 maven

Step 2 – Setup Environment Variables

As you have downloaded pre compiled Apache Maven files on your system. Now set the environments variables by creating new file /etc/profile.d/maven.sh.

$ sudo vi /etc/profile.d/maven.sh

and add following content.

export M2_HOME=/opt/maven
export PATH=${M2_HOME}/bin:${PATH}

Now load the environment variables in current shell using following command.

$ source /etc/profile.d/maven.sh

Step 4 – Check Version

Apache Maven has been successfully configured on your system. Use following command to check version of Maven.

$ mvn -version 

Webalizer on Red Hat and CentOS

https://blog.100tb.com/analyze-your-website-statistics-with-webalizer-on-red-hat-and-centos

Analyze Your Website Statistics with Webalizer on Red Hat and CentOS

When running a webserver your log files can rapidly fill with information about the visitors to your site, Webalizer can help.

Your webserver’s log files can be a mine of useful information with regards to the users visiting your website. Unfortunately, reading this information from the logs isn’t the simplest of tasks. To make this resource more useful there are tools available that look through the log files and generate statistics from them. Webalizer is one of these tools: it runs at regular intervals and creates statistics from your website logs as well as charts of usage. It is free and open source, being licensed under the GNU GPL.

How do I install Webalizer?

***For infomation on installing Webalizer on Debian & Ubuntu, read yesterdays post on the 100TB blog

Red Hat & CentOS

Installing Webalizer in Red Hat and CentOS is pretty straightforward as it is in the base repositories. So the install is as simple as the following command:

yum install webalizer

If you are using Apache, in its default configuration then your task of installing Webalizer is complete. Webalizer comes pre-configured to use Apache’s default log file for its data source, and then output its information to /var/www/usage with Apache configured to serve that directory as a subdirectory of the main website under /usage. To test this, simply run the following command:

webalizer

If all has worked correctly, Webalizer should have placed the various files that it creates in the /var/www/usage directory. If so, then you are done and the default cron task that is created through the installation should see you fine with keeping the statistics up to date.

 

100TB offers arround the clock technical support as a resource to help whenever you need answers. 

 

Apache with Virtualhost

If, on the other hand, you are using Apache with Virtualhosts then you have some work ahead of you, the first thing needing to be done is to create configuration files for each of your Virtualhosts. For this I’d suggest making a directory for these files then making copies of the webalizer.conf file in there for each Virtualhost domain you are running:

mkdir /etc/webalizer

cp /etc/webalizer.conf /etc/webalizer/webalizer.yourdomain.com.conf

The above commands create the webalizer config directory and then adds a config file. Note that you need to change yourdomain.com for the domain that you are using webalizer on. The next thing you need to do is edit the new configuration file to fit your configuration. For the following example we will be using a server configured to store log files in /var/log/httpd/yourdomain.com_access.log and the website files in the /var/www/yourdomain.com directory. The configuration file will need editing – I’m going to use nano in this example, but other text editors are available.

nano /etc/webalizer/webalizer.yourdomain.com.conf

The main lines to change are the LogFile line and the OutputDir line, so find those and edit them to match your configuration.

LogFile /var/log/httpd/yourdomain.com_access.log

OutputDir /var/www/yourdomain.com/webalizer

You can now save and exit this file. To avoid having to create a lot of extra configuration files for Apache, I’m using a subdirectory within the website directory for the Webalizer output. This means that it would be accessible from the web as below:

http://yourdomain.com/webalizer

The next step is to populate the directory for which we’ll need to run Webalizer:

webalizer -c /etc/webalizer/webalizer.yourdomain.com.conf

The -c flag tells Webalizer to use the specified configuration file rather than its default, so it should process the new configuration file and create the correct output. If this has worked properly then you should see the files in the directory you uses for the OutputDir.

 

Finalizing Webalizer

The last step is to create the cron task required to generate the webalizer output. This is where putting the configuration all within one directory will come in handy as we can create a simple BASH script to process the configuration files. Edit the Webalizer cron task created when Webalizer was installed and then use it to continue:

nano /etc/cron.daily/00webalizer

Remove all the content of this and then paste in the following code:

#!/bin/bash

# Update website statistics for Virtualhosts using /etc/webalizer directory

for i in /etc/webalizer/*.conf; do

  [ -f $i ] || continue;

  /usr/bin/webalizer -c ${i} -Q

done;

VMWare Tools: Searching for a valid kernel header path…

VMware tools throws:
Searching for a valid kernel header path…
The path “” is not a valid path to the 3.10.0-327.el7.x86_64 kernel headers.
Would you like to change it? [yes]

To prevent VMware tools not to find kernel headers, forget about the version delivered with Workstatio.8 or Fusion.8 which is something like VMwareTools-8.8.6-1035889.tar.gz
Instead download latest VMWare Tools from VMware i.e.: VMwareTools-10.0.5-3228253.tar.gz and

# rpm -qa | grep open-vm-tools
# rpm -e open-vm-tools-desktop-
# rpm -e open-vm-tools-
# yum installl gcc
# yum -y install kernel-devel-`uname -r`
# yum -y install kernel-headers-`uname -r`

VMware Tools can be downloaded from the Product Download page.

Stuff comes in a tar.gz containing an .iso image for VMWare tools.

 

  • VMware Tools support for guest operating systems in 10.1.7
    • windows.iso supports Windows Vista and later.
    • linux.iso supports Linux guest operating systems Red Hat Enterprise Linux (RHEL) 5 and later, SUSE Linux Enterprise Server (SLES) 11 and later, Ubuntu 10.04 and later. It also supports other distributions with glibc versions 2.5 and later.
    • darwin.iso supports Mac OS X versions 10.11 and later.
    • freebsd.iso supports FreeBSD versions.
    • solaris.iso supports Solaris versions.

 

# gunzip VMwareTools*.tar.gz
# tar xvf VMwareTools*.tar
# cd vmware-tools-distrib/
# perl vmware-install.pl 

 

Works for:
OEL 7.2 + Fusion 8
Centos 7 + Workstation 8

Cisco AnyConnect VPN Client for Linux

Cisco AnyConnect client supports Red Hat Enterprise Linux 6.x (32-bit) and 6.4 (64-bit), Ubuntu 9.x, 10.x, and 11.x (32-bit) and Ubuntu 12.04 & 12.10 (64-bit). It is a standalone tarball package for Linux platforms.

Installing CiscoAnyConnect VPN Client for Linux

Download Full installation package – Linux 64-bit (tar.gz) anyconnect-predeploy-linux-64-4.3.02039-k9.tar.gz.
With Cisco that ain’t easy because Cisco requires a Login & Valid Contract. Luckily I found the AnyConnect client on:
https://www.auckland.ac.nz/en/for/current-students/cs-current-pg/cs-current-pg-support/vpn/cs-cisco-vpn-client-for-linux.html At the time of this writing that is: anyconnect-predeploy-linux-64-3.1.04072-k9.tar

[root@alpha ~]# cd Downloads/
[root@alpha Downloads]# tar xvf anyconnect-predeploy-linux-64-3.1.04072-k9.tar

[root@alpha Downloads]# cd anyconnect-3.1.04072/
[root@alpha anyconnect-3.1.04072]# cd vpn
[root@alpha vpn]# ./vpn_install.sh

Start AnnyConnect client

[root@alpha vpn]# /opt/cisco/anyconnect/bin/vpnui

AnnyConnect says it requires pangox compatibility

/opt/cisco/anyconnect/bin/vpnui: error while loading shared libraries: libpangox-1.0.so.0: cannot open shared object file: No such file or directory

Install pangox-compat (requires EPEL)
[root@alpha Downloads]# wget http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm
[root@alpha Downloads]# rpm -ivh epel-release-7-8.noarch.rpm
[root@alpha Downloads]# yum -y install pangox-compat
[root@alpha Downloads]# /opt/cisco/anyconnect/bin/vpnui

Setting up a JDBC connection in Glassfish.

Setting up a JDBC connection in Glassfish.

https://computingat40s.wordpress.com/how-to-setup-a-jdbc-connection-in-glassfish/

This is one of that kind of things that can be a challenge for beginners but, at last, is really simple, easy and fast to do. Follow these steps.

  1. Be sure that you Glassfish server is stopped before going on.
  2. Download MySQL JDBC driver from Mysql.com. You can find it here. JDBC driver is called MySQL Connector/J. At the moment I write this article current version is 5.1.25
  3. Unzip and extract all files from the file. Locate the jar containing the JDBC driver. Currently this file is called mysql-connector-java-5.1.25-bin.jar and is located at the root of the folder you have just unzipped.
  4. Copy this jar file to $glassfish_install_folder\glassfish\lib
  5. Start Glassfish and go to the admin console, usually located at http://localhost:4848
  6. At left side on your console you will see a tree, and one node called Resources. Open Resources\JDBC\JDBC Connection Pools. Create a connection pool with the following properties:Pool name: MyDatabae
    Resource type: java.sql.Driver (you can choose any other but by now is the simplest option).
    Database Driver Vendor: MySQL. Click on next. Because you choose database driver vendor MySQL you will have already specified the driver classname (com.mysql.jdbc.Driver).
    Initial and Minimum Pool Size Set a zero value on this parameter. You don’t need initially 8 connections to the database in your development machine.Set the next additional properties:

    URL: jdbc:mysql://localhost:3306/booreg
    user: set the user you want to access this database. Notice that all connection will use the same user.
    password: write the password of your user. Notice that password is stored unencrypted.You should see your screen like similar to this two images:

How to mount partition with ntfs file system and read write access

http://linuxconfig.org/how-to-mount-partition-with-ntfs-file-system-and-read-write-access

1. Introduction

Purpose of this article is to provide to reader step by step guide, how to mount partition with NTFS file system on the Linux operating system. This article consists of two parts:

mount NTFS file system read only access
mount NTFS file system with read write access

 

2. Mount NTFS file system with read only access

2.1. NTFS kernel support

Majority of current Linux distributions supports NTFS file system out of the box. To be more specific, support for NTFS file system is more feature of Linux kernel modules rather than Linux distributions. First verify if we have NTFS modules installed on our system.

ls /lib/modules/2.6.18-5-686/kernel/fs/ | grep ntfs

check for NTFS kernel support

NTFS module is presented. Let’s identify NTFS partition.
2.2. Identifying partition with NTFS file system

One simple way to identify NTFS partition is:

fdisk -l | grep NTFS

Identifying partition with NTFS file system

There it is: /dev/sdb1
2.3. Mount NTFS partition

First create a mount point:

mkdir /mnt/ntfs

Then simply use mount command to mount it:

mount -t ntfs /dev/sdb1 /mnt/ntfs

Mount NTFS partition using linux
Now we can access NTFS partition and its files with read write access.
3. Mount NTFS file system with read write access

Mounting NTFS file system with read write access permissions is a bit more complicated. This involves installation of addition software such as fuse and ntfs-3g. In both cases you probably need to use your package management tool such as yum, apt-get, synaptic etc.. and install it from your standard distribution repository. Check for packages ntfs-3g and fuse. We take the other path which consists of manual compilation and installation fuse and ntfs-3g from source code.

3.1. Install addition software

3.1.1. Fuse Install

Download source code from: http://fuse.sourceforge.net/ or https://github.com/libfuse/libfuse

wget http://easynews.dl.sourceforge.net/sourceforge/fuse/fuse-2.7.1.tar.gz

Compile and install fuse source code:
Extract source file:

tar xzf fuse-2.7.1.tar.gz

Compile and install

cd fuse-2.7.1
 ./configure --exec-prefix=/; make; make install

Compile and install fuse source code

3.1.2. ntfs-3g install

Download source code from: http://www.ntfs-3g.org/index.html#download

wget http://www.ntfs-3g.org/ntfs-3g-1.1120.tgz

Extract source file:

tar xzf ntfs-3g-1.1120.tgz

Compile and install ntfs-3g source code
NOTE: Make sure that you have pkg-config package installed, otherwise you get this error message:

checking for pkg-config… no
checking for FUSE_MODULE… configure: error: FUSE >= 2.6.0 was not found. Either it’s not fully
installed (e.g. fuse, fuse-utils, libfuse, libfuse2, libfuse-dev, etc packages) or files from an old
version are still present. See FUSE at http://fuse.sf.net/

cd ntfs-3g-1.1120
 ./configure; make; make install

Compile and install ntfs-3g source code

3.2. Mount ntfs partition with read write access

mount -t ntfs-3g /dev/sdb1 /mnt/ntfs/

NOTE: ntfs-3g recommends to have at least kernel version 2.6.20 and higher.

linuxconfig.org~# mount -t ntfs-3g /dev/sdb1 /mnt/ntfs/
WARNING: Deficient Linux kernel detected. Some driver features are
not available (swap file on NTFS, boot from NTFS by LILO), and
unmount is not safe unless it’s made sure the ntfs-3g process
naturally terminates after calling ‘umount’. If you wish this
message to disappear then you should upgrade to at least kernel
version 2.6.20, or request help from your distribution to fix
the kernel problem. The below web page has more information:
http://ntfs-3g.org/support.html#fuse26