Skip to content

Commit

Permalink
Merge pull request #107 from hubblestack/develop
Browse files Browse the repository at this point in the history
Merge to master (prep v2017.8.3)
  • Loading branch information
basepi authored Aug 18, 2017
2 parents 0fca004 + 50a2fb2 commit 6a89ca8
Show file tree
Hide file tree
Showing 69 changed files with 485 additions and 238,128 deletions.
7 changes: 5 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ https://hubblestack.io

This installation method subscribes directly to our GitHub repository, pinning
to a tag or branch. This method requires no package installation or manual
checkouts.
checkouts. It also will subscribe to our policy repo,
[hubblestack_data](https://github.com/hubblestack/hubblestack_data).

Requirements: GitFS support on your Salt Master. (Usually just requires
installation of `gitpython` or `pygit2`. `pygit2` is the recommended gitfs
Expand All @@ -55,8 +56,10 @@ fileserver_backend:
- roots
- git
gitfs_remotes:
- https://github.com/hubblestack/hubblestack_data.git:
- root: ''
- https://github.com/hubblestack/hubble-salt.git:
- base: v2017.8.2
- base: v2017.8.3
- root: ''
```
Expand Down
2 changes: 1 addition & 1 deletion _beacons/pulsar.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
DEFAULT_MASK = None

__virtualname__ = 'pulsar'
__version__ = 'v2017.8.2'
__version__ = 'v2017.8.3'
CONFIG = None
CONFIG_STALENESS = 0

Expand Down
2 changes: 1 addition & 1 deletion _beacons/win_pulsar.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
DEFAULT_TYPE = 'all'

__virtualname__ = 'pulsar'
__version__ = 'v2017.8.2'
__version__ = 'v2017.8.3'
CONFIG = None
CONFIG_STALENESS = 0

Expand Down
2 changes: 1 addition & 1 deletion _modules/hubble.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
from nova_loader import NovaLazyLoader

__nova__ = {}
__version__ = 'v2017.8.2'
__version__ = 'v2017.8.3'


def audit(configs=None,
Expand Down
2 changes: 1 addition & 1 deletion _modules/nebula_osquery.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@

log = logging.getLogger(__name__)

__version__ = 'v2017.8.2'
__version__ = 'v2017.8.3'
__virtualname__ = 'nebula'


Expand Down
2 changes: 1 addition & 1 deletion _modules/win_pulsar.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
CONFIG = None
CONFIG_STALENESS = 0

__version__ = 'v2017.8.2'
__version__ = 'v2017.8.3'


def __virtual__():
Expand Down
2 changes: 1 addition & 1 deletion _returners/slack_pulsar_returner.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@
# Import Salt Libs
import salt.returners

__version__ = 'v2017.8.2'
__version__ = 'v2017.8.3'

log = logging.getLogger(__name__)

Expand Down
202 changes: 103 additions & 99 deletions _returners/splunk_nebula_return.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@

import logging

__version__ = 'v2017.8.2'
__version__ = 'v2017.8.3'

_max_content_bytes = 100000
http_event_collector_SSL_verify = False
Expand All @@ -62,106 +62,109 @@


def returner(ret):
opts_list = _get_options()

# Get cloud details
clouds = get_cloud_details()

for opts in opts_list:
logging.info('Options: %s' % json.dumps(opts))
http_event_collector_key = opts['token']
http_event_collector_host = opts['indexer']
http_event_collector_port = opts['port']
hec_ssl = opts['http_event_server_ssl']
proxy = opts['proxy']
timeout = opts['timeout']
custom_fields = opts['custom_fields']

# Set up the fields to be extracted at index time. The field values must be strings.
# Note that these fields will also still be available in the event data
index_extracted_fields = ['aws_instance_id', 'aws_account_id', 'azure_vmId']
try:
index_extracted_fields.extend(opts['index_extracted_fields'])
except TypeError:
pass

# Set up the collector
hec = http_event_collector(http_event_collector_key, http_event_collector_host, http_event_port=http_event_collector_port, http_event_server_ssl=hec_ssl, proxy=proxy, timeout=timeout)

# st = 'salt:hubble:nova'
data = ret['return']
minion_id = ret['id']
jid = ret['jid']
master = __grains__['master']
fqdn = __grains__['fqdn']
# Sometimes fqdn is blank. If it is, replace it with minion_id
fqdn = fqdn if fqdn else minion_id
try:
fqdn_ip4 = __grains__['fqdn_ip4'][0]
except IndexError:
fqdn_ip4 = __grains__['ipv4'][0]
if fqdn_ip4.startswith('127.'):
for ip4_addr in __grains__['ipv4']:
if ip4_addr and not ip4_addr.startswith('127.'):
fqdn_ip4 = ip4_addr
break

if not data:
return
else:
for query in data:
for query_name, query_results in query.iteritems():
for query_result in query_results['data']:
event = {}
payload = {}
event.update(query_result)
event.update({'query': query_name})
event.update({'job_id': jid})
event.update({'master': master})
event.update({'minion_id': minion_id})
event.update({'dest_host': fqdn})
event.update({'dest_ip': fqdn_ip4})

for cloud in clouds:
event.update(cloud)

for custom_field in custom_fields:
custom_field_name = 'custom_' + custom_field
custom_field_value = __salt__['config.get'](custom_field, '')
if isinstance(custom_field_value, str):
event.update({custom_field_name: custom_field_value})
elif isinstance(custom_field_value, list):
custom_field_value = ','.join(custom_field_value)
event.update({custom_field_name: custom_field_value})

payload.update({'host': fqdn})
payload.update({'index': opts['index']})
if opts['add_query_to_sourcetype']:
payload.update({'sourcetype': "%s_%s" % (opts['sourcetype'], query_name)})
else:
payload.update({'sourcetype': opts['sourcetype']})
payload.update({'event': event})

# Potentially add metadata fields:
fields = {}
for item in index_extracted_fields:
if item in payload['event'] and not isinstance(payload['event'][item], (list, dict, tuple)):
fields[item] = str(payload['event'][item])
if fields:
payload.update({'fields': fields})

# If the osquery query includes a field called 'time' it will be checked.
# If it's within the last year, it will be used as the eventtime.
event_time = query_result.get('time', '')
try:
if (datetime.fromtimestamp(time.time()) - datetime.fromtimestamp(float(event_time))).days > 365:
try:
opts_list = _get_options()

# Get cloud details
clouds = get_cloud_details()

for opts in opts_list:
logging.info('Options: %s' % json.dumps(opts))
http_event_collector_key = opts['token']
http_event_collector_host = opts['indexer']
http_event_collector_port = opts['port']
hec_ssl = opts['http_event_server_ssl']
proxy = opts['proxy']
timeout = opts['timeout']
custom_fields = opts['custom_fields']

# Set up the fields to be extracted at index time. The field values must be strings.
# Note that these fields will also still be available in the event data
index_extracted_fields = ['aws_instance_id', 'aws_account_id', 'azure_vmId']
try:
index_extracted_fields.extend(opts['index_extracted_fields'])
except TypeError:
pass

# Set up the collector
hec = http_event_collector(http_event_collector_key, http_event_collector_host, http_event_port=http_event_collector_port, http_event_server_ssl=hec_ssl, proxy=proxy, timeout=timeout)

# st = 'salt:hubble:nova'
data = ret['return']
minion_id = ret['id']
jid = ret['jid']
master = __grains__['master']
fqdn = __grains__['fqdn']
# Sometimes fqdn is blank. If it is, replace it with minion_id
fqdn = fqdn if fqdn else minion_id
try:
fqdn_ip4 = __grains__['fqdn_ip4'][0]
except IndexError:
fqdn_ip4 = __grains__['ipv4'][0]
if fqdn_ip4.startswith('127.'):
for ip4_addr in __grains__['ipv4']:
if ip4_addr and not ip4_addr.startswith('127.'):
fqdn_ip4 = ip4_addr
break

if not data:
return
else:
for query in data:
for query_name, query_results in query.iteritems():
for query_result in query_results['data']:
event = {}
payload = {}
event.update(query_result)
event.update({'query': query_name})
event.update({'job_id': jid})
event.update({'master': master})
event.update({'minion_id': minion_id})
event.update({'dest_host': fqdn})
event.update({'dest_ip': fqdn_ip4})

for cloud in clouds:
event.update(cloud)

for custom_field in custom_fields:
custom_field_name = 'custom_' + custom_field
custom_field_value = __salt__['config.get'](custom_field, '')
if isinstance(custom_field_value, str):
event.update({custom_field_name: custom_field_value})
elif isinstance(custom_field_value, list):
custom_field_value = ','.join(custom_field_value)
event.update({custom_field_name: custom_field_value})

payload.update({'host': fqdn})
payload.update({'index': opts['index']})
if opts['add_query_to_sourcetype']:
payload.update({'sourcetype': "%s_%s" % (opts['sourcetype'], query_name)})
else:
payload.update({'sourcetype': opts['sourcetype']})
payload.update({'event': event})

# Potentially add metadata fields:
fields = {}
for item in index_extracted_fields:
if item in payload['event'] and not isinstance(payload['event'][item], (list, dict, tuple)):
fields[item] = str(payload['event'][item])
if fields:
payload.update({'fields': fields})

# If the osquery query includes a field called 'time' it will be checked.
# If it's within the last year, it will be used as the eventtime.
event_time = query_result.get('time', '')
try:
if (datetime.fromtimestamp(time.time()) - datetime.fromtimestamp(float(event_time))).days > 365:
event_time = ''
except:
event_time = ''
except:
event_time = ''
finally:
hec.batchEvent(payload, eventtime=event_time)
finally:
hec.batchEvent(payload, eventtime=event_time)

hec.flushBatch()
hec.flushBatch()
except:
log.exception('Error ocurred in splunk_nebula_return')
return


Expand Down Expand Up @@ -202,6 +205,7 @@ def _get_options():
splunk_opts['proxy'] = __salt__['config.get']('hubblestack:nebula:returner:splunk:proxy', {})
splunk_opts['timeout'] = __salt__['config.get']('hubblestack:nebula:returner:splunk:timeout', 9.05)
splunk_opts['index_extracted_fields'] = __salt__['config.get']('hubblestack:nebula:returner:splunk:index_extracted_fields', [])
splunk_opts['port'] = __salt__['config.get']('hubblestack:nebula:returner:splunk:port', '8088')

return [splunk_opts]

Expand Down
Loading

0 comments on commit 6a89ca8

Please sign in to comment.