Commit 0addb301 authored by Botond Botyanszki's avatar Botond Botyanszki

Import stuff.

parent 8c93d5e5
#!/bin/sh
SUBJ="/CN=ca/O=nxlog.org/C=HU/ST=state/L=location"
openssl req -x509 -nodes -newkey rsa:2048 -keyout agent-ca-key.pem -out agent-ca.pem -batch -subj "$SUBJ" -config gencert.cnf
#
# OpenSSL example configuration file.
# This is mostly being used for generation of certificate requests.
#
# This definition stops the following lines choking if HOME isn't
# defined.
HOME = .
RANDFILE = $ENV::HOME/.rnd
# Extra OBJECT IDENTIFIER info:
#oid_file = $ENV::HOME/.oid
oid_section = new_oids
# To use this configuration file with the "-extfile" option of the
# "openssl x509" utility, name here the section containing the
# X.509v3 extensions to use:
# extensions =
# (Alternatively, use a configuration file that has only
# X.509v3 extensions in its main [= default] section.)
[ new_oids ]
# We can add new OIDs in here for use by 'ca' and 'req'.
# Add a simple OID like this:
# testoid1=1.2.3.4
# Or use config file substitution like this:
# testoid2=${testoid1}.5.6
####################################################################
[ ca ]
default_ca = CA_default # The default ca section
####################################################################
[ CA_default ]
dir = /tmp # Where everything is kept
certs = $dir # Where the issued certs are kept
crl_dir = $dir # Where the issued crl are kept
database = $dir/index.txt # database index file.
new_certs_dir = $dir # default place for new certs.
certificate = $dir/ca.crt # The CA certificate
serial = $dir/serial # The current serial number
crl = $dir/crl.pem # The current CRL
private_key = $dir/ca.key # The private key
RANDFILE = $dir/.rand # private random number file
x509_extensions = usr_cert # The extentions to add to the cert
default_days = 1 # how long to certify for
default_crl_days= 30 # how long before next CRL
default_md = md5 # which md to use.
preserve = no # keep passed DN ordering
# A few difference way of specifying how similar the request should look
# For type CA, the listed attributes must be the same, and the optional
# and supplied fields are just that :-)
policy = policy_match
# For the CA policy
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
####################################################################
[ req ]
default_bits = 2048
default_keyfile = privkey.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
x509_extensions = v3_ca # The extentions to add to the self signed cert
# Passwords for private keys if not present they will be prompted for
# input_password = secret
# output_password = secret
# This sets a mask for permitted string types. There are several options.
# default: PrintableString, T61String, BMPString.
# pkix : PrintableString, BMPString.
# utf8only: only UTF8Strings.
# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).
# MASK:XXXX a literal mask value.
# WARNING: current versions of Netscape crash on BMPStrings or UTF8Strings
# so use this option with caution!
string_mask = nombstr
# req_extensions = v3_req # The extensions to add to a certificate request
[ req_distinguished_name ]
countryName = Country Name (2 letter code)
countryName_default = HU
countryName_min = 2
countryName_max = 2
stateOrProvinceName = State or Province Name (full name)
#stateOrProvinceName_default =
localityName = Locality Name (eg, city)
#localityName_default =
0.organizationName = Organization Name (eg, company)
#0.organizationName_default =
# we can do this but it is not needed normally :-)
#1.organizationName = Second Organization Name (eg, company)
#1.organizationName_default = World Wide Web Pty Ltd
organizationalUnitName = Organizational Unit Name (eg, section)
#organizationalUnitName_default =
commonName = Common Name (eg, your name or your server\'s hostname)
commonName_max = 64
emailAddress = Email Address
#emailAddress_default = cert@nxlog.org
emailAddress_max = 40
# SET-ex3 = SET extension number 3
[ req_attributes ]
challengePassword = A challenge password
challengePassword_min = 4
challengePassword_max = 20
unstructuredName = An optional company name
[ usr_cert ]
# These extensions are added when 'ca' signs a request.
# This goes against PKIX guidelines but some CAs do it and some software
# requires this to avoid interpreting an end user certificate as a CA.
basicConstraints=CA:FALSE
# Here are some examples of the usage of nsCertType. If it is omitted
# the certificate can be used for anything *except* object signing.
nsCertType = server, client
# For an object signing certificate this would be used.
# nsCertType = objsign
# For normal client use this is typical
# nsCertType = client, email
# and for everything including object signing:
# nsCertType = client, email, objsign
# This is typical in keyUsage for a client certificate.
# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
# This will be displayed in Netscape's comment listbox.
nsComment = "Temporary log4ensics Certificate"
# PKIX recommendations harmless if included in all certificates.
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid,issuer:always
# This stuff is for subjectAltName and issuerAltname.
# Import the email address.
# subjectAltName=email:copy
# Copy subject details
# issuerAltName=issuer:copy
#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
#nsBaseUrl
#nsRevocationUrl
#nsRenewalUrl
#nsCaPolicyUrl
#nsSslServerName
[ v3_req ]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
nsCertType = server, client
[ v3_ca ]
# Extensions for a typical CA
# PKIX recommendation.
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer:always
# This is what PKIX recommends but some broken software chokes on critical
# extensions.
#basicConstraints = critical,CA:true
# So we do this instead.
basicConstraints = CA:true
# Key usage: this is typical for a CA certificate. However since it will
# prevent it being used as an test self-signed certificate it is best
# left out by default.
# keyUsage = cRLSign, keyCertSign
# Some might want this also
# nsCertType = sslCA, emailCA
# Include email address in subject alt name: another PKIX recommendation
# subjectAltName=email:copy
# Copy issuer details
# issuerAltName=issuer:copy
# DER hex encoding of an extension: beware experts only!
# obj=DER:02:03
# Where 'obj' is a standard or added object
# You can even override a supported extension:
# basicConstraints= critical, DER:30:03:01:01:FF
[ crl_ext ]
# CRL extensions.
# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.
# issuerAltName=issuer:copy
authorityKeyIdentifier=keyid:always,issuer:always
#!/bin/sh
SUBJ="/CN=agent-name.exmaple.com/O=nxlog.org/C=HU/ST=state/L=location"
CERTDIR=.
openssl req -new -newkey rsa:2048 -nodes -keyout agent-key.pem -out req.pem -batch -subj "$SUBJ" -config gencert.cnf
openssl x509 -req -days 1 -in req.pem -CA agent-ca.pem -CAkey agent-ca-key.pem -out agent-cert.pem -set_serial 01
rm -f req.pem
[[amazon_s3_support]]
= Amazon S3 integration using (i|o)m_python
NXLog can both send and receive events to and from an Amazon S3 cloud object
storage. The NXLog python modules for input and output are used (`im_python` and
`om_python`) as well as boto3 the AWS SDK for Python. More information about
`boto3` can be found at https://aws.amazon.com/sdk-for-python/
[[amazon_s3_config]]
== Configuring boto3
The first step is to install and configure boto3 into your system.
Boto3 can be installed can using `pip` or your package manager respectively.
.Using pip
[source,bash]
----
$ pip install boto3
----
.Using the package manager of a Debian based distro
[source,bash]
----
# apt-get install python-boto3
----
.Using the package manager of a Red Hat based distro
[source,bash]
----
# yum install python2-boto3
----
NOTE: `python2-boto3` package requires the installation of EPEL repository.
After creating an AWS service account, your local setup requires some
configuration. Two files responsible for selecting the default region
as well as your credentials must be created. This can be done interactively,
if you have the AWS CLI installed or manually, by editing the files shown
bellow. Credentials for your AWS account can be found in the IAM Console.
You can create or use an existing user. Go to manage access keys and generate
a new set of keys.
.~/.aws/config
[source,config]
----
[default]
region=eu-central-1
----
.~/.aws/credentials
[source,bash]
----
[default]
aws_access_key_id = YOUR_ACCESS_KEY
aws_secret_access_key = YOUR_SECRET_KEY
----
More information about the initial setup and the credentials can be found
at https://boto3.readthedocs.io/en/latest/guide/quickstart.html
and https://boto3.readthedocs.io/en/latest/guide/configuration.html
NOTE: The region and credential configuration can also be hardcoded in the
code however, this is not considered a good practice.
[[amazon_s3_explained]]
== AWS S3 Buckets, objects, keys and structure
Both the input and output python scripts interact with a bucket on Amazon S3.
The scripts will not create, delete or alter the bucket and any of its properties,
permissions or management options. It is the responsibility of the user to create
the bucket, provide the appropriate permissions (ACL) and further configure any
options like lifecycle options, replication options, encryption, etc. Similarly,
the scripts do not alter the storage class of the objects stored or any other
properties or permissions. General information about Amazon S3 can be found at
https://docs.aws.amazon.com/AmazonS3/latest/gsg/GetStartedWithS3.html
and the Amazon S3 console at
https://docs.aws.amazon.com/AmazonS3/latest/gsg/GetStartedWithS3.html
Amazon S3 stores objects inside containers called buckets. There is a finite
number of buckets that you can have and an infinite number of objects that
you can store. We selected a schema where we store events on a single bucket
and each object has a key that references the server (or service) name, the date
and the event received time. Even though Amazon S3 uses a flat structure to
store object, objects with similar key prefixes are grouped together resembling
the structure of a file system. The following is a visual representation of
the naming scheme used. Note that the key name in the deepest level represent
time, however Amazon S3 uses the `:` character as a special character and to
avoid escaping we selected the `.` character to substitute it.
* MYBUCKET/
** SERVER01/
*** 2018-05-17/
**** 12.36.34.1
**** 12.36.35.1
*** 2018-05-18/
**** 10.46.34.1
**** 10.46.35.1
**** 10.46.35.2
**** 10.46.36.1
** SERVER02/
*** 2018-05-16/
**** 14.23.12.1
*** 2018-05-17/
**** 17.03.52.1
**** 17.03.52.2
**** 17.03.52.3
[[amazon_s3_output]]
== Storing events to Amazon S3
This section explains the python script and NXLog configuration needed to
store events to an Amazon S3 cloud object storage.
Configure NXLog similarly to the following. For simplicity we are reading events
from a file.
.nxlog.conf
[source,config]
----
include::../output/nxlog.conf[]
----
The python script used to store events on Amazon S3.
.s3_write.py
[source,python]
----
include::../output/s3_write.py[]
----
Edit the `BUCKET` and `SERVER` variables on the code. Events are stored in
the Amazon S3 bucket with object key names comprised from the server name,
date in YYYY-MM-DD format, time in HH.MM.SS format, plus a counter since events
can be received on the same second.
[[amazon_s3_input]]
== Retreving events from Amazon S3
This section explains the python script and NXLog configuration needed to
retrieve events from an Amazon S3 cloud object storage.
Configure NXLog similarly to the following. For simplicity we are saving events
to a file.
.nxlog.conf
[source,config]
----
include::../input/nxlog.conf[]
----
.s3_read.py
[source,python]
----
include::../input/s3_read.py[]
----
Edit the `BUCKET` and `SERVER` variables on the code. The `POLL_INTERVAL` is
the time the script will wait before checking again for new events. The
`MAXKEYS` variable should be fine in all cases as the default value of 1000
keys. The script keeps track of the last object retrieved from Amazon S3 by
means of a file called `lastkey.log`, stored locally. Even in the event of an
abnormal termination the script will continue from where it stopped.
You can delete (or even edit) the `lastkey.log` file to reset that behavior.
[[amazon_s3_extras]]
=== Serialization and compression
In the previous examples only the `raw_event` field was stored in the objects.
An easy way to store more than one field is to "pickle" (better known as
serialize or marshal) them together. The following lines of python code show
how to do so for all the fields of an event.
.Pickle events
[source,python]
----
import pickle
all = {}
for field in event.get_names():
all.update({field: event.get_field(field)})
newraw = pickle.dumps(all)
client.put_object(Body=newraw, Bucket=BUCKET, Key=key)
----
Compressing the events with gzip is also possible. The following python
code explains how to do so.
.Gzip events
[source,python]
----
import StringIO
import gzip
out = StringIO.StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(newraw)
gzallraw = out.getvalue()
client.put_object(Body=gzallraw, Bucket=BUCKET, Key=key)
----
This diff is collapsed.
This diff is collapsed.
#!/bin/sh
# Generate HTML format
asciidoctor amazon-s3.adoc
# Generate PDF format
asciidoctor-pdf amazon-s3.adoc
<Input in>
Module im_python
PythonCode s3_read.py
</Input>
<Output out>
Module om_file
File "output.log"
</Output>
<Route exec_to_file>
Path in => out
</Route>
#!/usr/bin/python
import boto3
import nxlog
BUCKET = 'MYBUCKET'
SERVER = 'MYSERVER'
# 1000 Keys is the maximum allowed from the AWS API
MAXKEYS = 1000
POLL_INTERVAL = 30
def save_key(key):
fo = open('lastkey.log', 'wb', 0)
fo.write(key)
fo.close()
def load_key():
try:
fo = open('lastkey.log', 'rb', 0)
key = fo.read()
fo.close()
return key
except IOError:
return ''
def read_data(module):
nxlog.log_debug('Checking for new archives')
#Insert credentials at ~/.aws/credentials
client = boto3.client('s3')
#You can hardcode but not advisable
#client = boto3.client('s3', aws_access_key_id='XXXXX', aws_secret_access_key='XXXXXXXX')
lastkey = load_key()
keycount = MAXKEYS
while MAXKEYS == keycount:
if lastkey == '':
data = client.list_objects_v2(Bucket=BUCKET, Prefix=SERVER, MaxKeys=MAXKEYS);
else:
data = client.list_objects_v2(Bucket=BUCKET, Prefix=SERVER, MaxKeys=MAXKEYS, StartAfter=lastkey);
keycount = data['KeyCount']
if keycount > 0:
for obj in data['Contents']:
lastkey = obj['Key']
raw = client.get_object(Bucket=BUCKET, Key=lastkey)
line = raw['Body'].read().decode('utf-8')
event = module.logdata_new()
event.set_field('raw_event', line)
event.post()
save_key(lastkey)
module.set_read_timer(POLL_INTERVAL)
<Input in>
Module im_file
File "input.log"
SavePos FALSE
ReadFromLast FALSE
</Input>
<Output out>
Module om_python
PythonCode s3_write.py
</Output>
<Route exec_to_file>
Path in => out
</Route>
#!/usr/bin/python
import nxlog
import boto3
from botocore.exceptions import ClientError
from datetime import datetime
BUCKET = 'MYBUCKET'
SERVER = 'MYSERVER'
lastdt = datetime.now()
def counter(rdtime, v=[0]):
global lastdt
if lastdt < rdtime:
lastdt = rdtime
v[0]=0
v[0]+=1
return v[0]
def write_data(event):
nxlog.log_debug('Python alerter received event')
raw = event.get_field('raw_event')
rtime = event.get_field('EventReceivedTime')
dt = datetime.strptime(rtime, "%Y-%m-%d %H:%M:%S")
key = SERVER + '/' + rtime.replace(' ', '/').replace(':', '.',3) + '.' + str(counter(dt))
#Insert credentials at ~/.aws/credentials
client = boto3.client('s3')
#You can hardcode but not advisable
#client = boto3.client('s3', aws_access_key_id='XXXXX', aws_secret_access_key='XXXXXXXX')
try:
client.put_object(Body=raw, Bucket=BUCKET, Key=key)
except ClientError as e:
nxlog.log_error("Error: {0}".format(e))
#!/usr/bin/env python
# This is a PoF script that can be used with 'om_exec' NXLog module to
# ship logs to Microsoft Azure Cloud (Log Analytics / OMS) via REST API.
# NXLog configuration:
# -------------------
# <Output out>
# Module om_exec
# Command /tmp/samplepy
# </Output>
# -------------------
import json
import requests
import datetime
import hashlib
import hmac
import base64
import fileinput
# Update the customer ID to your Operations Management Suite workspace ID
customer_id = '<cid>'
# For the shared key, use either the primary or the secondary Connected Sources client authentication key
shared_key = "<skey>"
# The log type is the name of the event that is being submitted
log_type = 'STDIN_PY'
#####################
######Functions######
#####################
# Build the API signature
def build_signature(customer_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash).encode('utf-8')
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest())
authorization = "SharedKey {}:{}".format(customer_id,encoded_hash)
return authorization
# Build and send a request to the POST API
def post_data(customer_id, shared_key, body, log_type):
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = 'https://' + customer_id + '.ods.opinsights.azure.com' + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
response = requests.post(uri,data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
print 'Accepted'
else:
print "Response code: {}".format(response.status_code)
for body in fileinput.input():
post_data(customer_id, shared_key, body, log_type)
# DB is saved in /opt/nxsec/var/spool/nxlog/fim-rules/
# NOTE: NXLog must run be running as "root" to have read access to
# all files in /etc/
<Input fim-rules>
Module im_fim
Recursive True
ScanInterval 86400
File "/etc/*"
File "/usr/bin/*"
File "/usr/sbin/*"
File "/bin/*"
File "/sbin/*"
Exclude "/etc/mtab"
Exclude "/etc/mnttab"
Exclude "/etc/hosts.deny"
Exclude "/etc/mail/statistics"
Exclude "/etc/random-seed"
Exclude "/etc/adjtime"
Exclude "/etc/httpd/logs"
Exclude "/etc/utmpx"
Exclude "/etc/wtmpx"
Exclude "/etc/cups/certs"
Exclude "/etc/dumpdates"
Exclude "/etc/svc/volatile"
</Input>
<Output out-fim-rules>
Module om_file
Exec to_json();
File '/tmp/fim-rules.log'
</Output>
<Route route1>
Path fim-rules => out-fim-rules
</Route>
# DB is saved in /opt/nxsec/var/spool/nxlog/fim-rules/
# NOTE: NXLog must run be running as "root" to have read access to
# all files in /etc/
<Input fim-rules>
Module im_fim
Recursive True
ScanInterval 86400
File "/etc/*"
File "/usr/bin/*"
File "/usr/sbin/*"
File "/bin/*"
File "/sbin/*"
Exclude "/etc/mtab"
Exclude "/etc/hosts.deny"
</Input>
<Output out-fim-rules>
Module om_file
Exec to_json();
File '/tmp/fim-rules.log'
</Output>
<Route route1>
Path fim-rules => out-fim-rules
</Route>
# DB is saved in C:Program Files\nxlog\data\nxlog\fim-rules\ and ~\fim-rules-recursive\
# NOTE: NXLog must run be running as "Administrator" to have read access to all files.
<Input fim_rules>
Module im_fim
Recursive False
ScanInterval 86400
File "C:\Program Files\*"
File "C:\Program Files\Internet Explorer\*"
File "C:\Program Files\Common Files\*"
File "C:\Program Files (x86)\*"
File "C:\Program Files (x86) \Common Files\*"
File "C:\ProgramData\*"
File "C:\Windows\*"
File "C:\Windows\System32\*"
File "C:\Windows\System32\Drivers\*"