From 92523e8e9bba4766b387933b558fd3950b0f7f53 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 16 Feb 2017 09:50:15 -0700 Subject: [PATCH 01/18] Fix for Debian systems which have an int for os_version Ref https://github.com/hubblestack/nova/pull/299 --- hubblestack_nova/cve_scan_v2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hubblestack_nova/cve_scan_v2.py b/hubblestack_nova/cve_scan_v2.py index 1dc83bf..6e0ac7c 100644 --- a/hubblestack_nova/cve_scan_v2.py +++ b/hubblestack_nova/cve_scan_v2.py @@ -168,7 +168,7 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): os.remove(cached_zip) extracted_json = os.path.join(__opts__['cachedir'], 'cve_scan_cache', - '%s_%s.json' % (os_name, os_version.replace('.', ''))) + '%s_%s.json' % (os_name, str(os_version).replace('.', ''))) log.debug('attempting to open %s', extracted_json) with open(extracted_json, 'r') as json_file: master_json = json.load(json_file) From a5d63801d81f365c20c2617244031c638dd4e642 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 16 Feb 2017 14:44:34 -0700 Subject: [PATCH 02/18] Add uptime fallback query --- _modules/nebula_osquery.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/_modules/nebula_osquery.py b/_modules/nebula_osquery.py index 50f0a4a..41dd5d5 100644 --- a/_modules/nebula_osquery.py +++ b/_modules/nebula_osquery.py @@ -99,6 +99,15 @@ def queries(query_group, 'result': True }} ) + uptime = __salt__['status.uptime']() + if isinstance(uptime, dict): + uptime = uptime.get('seconds', __salt__['cmd.run']('uptime')) + ret.append( + {'fallback_uptime': { + 'data': [{'uptime': uptime}], + 'result': True + }} + ) if report_version_with_day: ret.append(hubble_versions()) return ret From fb6e9f020d70866dce7465b62dd36c36c32dfc0b Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 16 Feb 2017 14:59:34 -0700 Subject: [PATCH 03/18] Fix for blank hosts when fqdn doesn't return anything --- _returners/splunk_nebula_return.py | 2 ++ _returners/splunk_nova_return.py | 2 ++ _returners/splunk_pulsar_return.py | 2 ++ 3 files changed, 6 insertions(+) diff --git a/_returners/splunk_nebula_return.py b/_returners/splunk_nebula_return.py index c2df68f..1f5fa3b 100644 --- a/_returners/splunk_nebula_return.py +++ b/_returners/splunk_nebula_return.py @@ -94,6 +94,8 @@ def returner(ret): jid = ret['jid'] master = __grains__['master'] fqdn = __grains__['fqdn'] + # Sometimes fqdn is blank. If it is, replace it with minion_id + fqdn = fqdn if fqdn else minion_id try: fqdn_ip4 = __grains__['fqdn_ip4'][0] except IndexError: diff --git a/_returners/splunk_nova_return.py b/_returners/splunk_nova_return.py index e5cee5d..6cf9197 100644 --- a/_returners/splunk_nova_return.py +++ b/_returners/splunk_nova_return.py @@ -91,6 +91,8 @@ def returner(ret): minion_id = ret['id'] jid = ret['jid'] fqdn = __grains__['fqdn'] + # Sometimes fqdn is blank. If it is, replace it with minion_id + fqdn = fqdn if fqdn else minion_id master = __grains__['master'] try: fqdn_ip4 = __grains__['fqdn_ip4'][0] diff --git a/_returners/splunk_pulsar_return.py b/_returners/splunk_pulsar_return.py index 63fdd6e..c014e78 100644 --- a/_returners/splunk_pulsar_return.py +++ b/_returners/splunk_pulsar_return.py @@ -98,6 +98,8 @@ def returner(ret): data = _dedupList(data) minion_id = __opts__['id'] fqdn = __grains__['fqdn'] + # Sometimes fqdn is blank. If it is, replace it with minion_id + fqdn = fqdn if fqdn else minion_id master = __grains__['master'] try: fqdn_ip4 = __grains__['fqdn_ip4'][0] From 5f96d10ddf602e782ad37534f7414a6f7edd617e Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 16 Feb 2017 15:12:41 -0700 Subject: [PATCH 04/18] Pull changes back upstream from saltless hubble --- _returners/splunk_nebula_return.py | 2 +- _returners/splunk_nova_return.py | 2 +- _returners/splunk_pulsar_return.py | 13 +++++++++++-- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/_returners/splunk_nebula_return.py b/_returners/splunk_nebula_return.py index 1f5fa3b..5ec25f9 100644 --- a/_returners/splunk_nebula_return.py +++ b/_returners/splunk_nebula_return.py @@ -54,7 +54,7 @@ _max_content_bytes = 100000 http_event_collector_SSL_verify = False -http_event_collector_debug = True +http_event_collector_debug = False log = logging.getLogger(__name__) diff --git a/_returners/splunk_nova_return.py b/_returners/splunk_nova_return.py index 6cf9197..8e374dd 100644 --- a/_returners/splunk_nova_return.py +++ b/_returners/splunk_nova_return.py @@ -54,7 +54,7 @@ _max_content_bytes = 100000 http_event_collector_SSL_verify = False -http_event_collector_debug = True +http_event_collector_debug = False log = logging.getLogger(__name__) diff --git a/_returners/splunk_pulsar_return.py b/_returners/splunk_pulsar_return.py index c014e78..f16ae02 100644 --- a/_returners/splunk_pulsar_return.py +++ b/_returners/splunk_pulsar_return.py @@ -57,7 +57,7 @@ _max_content_bytes = 100000 http_event_collector_SSL_verify = False -http_event_collector_debug = True +http_event_collector_debug = False log = logging.getLogger(__name__) @@ -65,6 +65,9 @@ def returner(ret): + if isinstance(ret, dict) and not ret.get('return'): + # Empty single return, let's not do any setup or anything + return # Customized to split up the change events and send to Splunk. opts = _get_options() logging.info('Options: %s' % json.dumps(opts)) @@ -106,8 +109,14 @@ def returner(ret): except IndexError: fqdn_ip4 = __grains__['ipv4'][0] + alerts = [] for item in data: - alert = item['return'] + events = item['return'] + if not isinstance(events, list): + events = [events] + alerts.extend(events) + + for alert in alerts: event = {} payload = {} if('change' in alert): # Linux, normal pulsar From 8480aa91e253c54936b33510b2488f776d3cec34 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Mon, 27 Feb 2017 13:30:55 -0700 Subject: [PATCH 05/18] Add actual content to readme --- README.md | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 054c0d7..e3b5565 100644 --- a/README.md +++ b/README.md @@ -1 +1,30 @@ -# Hubble \ No newline at end of file +# Hubble + +Hubble is a modular, open-source security compliance framework built on top of +SaltStack. The project provides on-demand profile-based auditing, real-time +security event notifications, automated remediation, alerting and reporting. +https://hubblestack.io + +## Installation (GitFS) + +This installation method subscribes directly to our GitHub repository, pinning +to a tag or branch. This method requires no package installation or manual +checkouts. + +Requirements: GitFS support on your Salt Master. (Usually just requires +installation of `gitpython` or `pygit2`. `pygit2` is the recommended gitfs +provider.) + +*/etc/salt/master.d/hubblestack-nova.conf* + +```yaml +fileserver_backend: + - roots + - git +gitfs_remotes: + - https://github.com/hubblestack/hubble-salt.git: + - base: v2017.1.0 + - root: '' +``` + +> Remember to restart the Salt Master after applying this change. From 09d8b4403274adeb2442c192b6b65fe5239b1183 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Mon, 27 Feb 2017 13:32:35 -0700 Subject: [PATCH 06/18] Add more installation instructions --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index e3b5565..66faacc 100644 --- a/README.md +++ b/README.md @@ -28,3 +28,9 @@ gitfs_remotes: ``` > Remember to restart the Salt Master after applying this change. + +You can then run `salt '*' saltutil.sync_all` to sync the modules to your +minions. + +See `pillar.example` for sample pillar data for configuring the pulsar beacon +and the splunk/slack returners. From 1caf8e6c6c2fb6ca263927abcd1f2ca4a2f17283 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Tue, 28 Feb 2017 13:01:09 -0700 Subject: [PATCH 07/18] Add latest upstream nova profile changes --- .../cis/centos-7-level-1-scored-v1.yaml | 13 +- .../cis/centos-7-level-1-scored-v2-1-0.yaml | 35 +- .../cis/centos-7-level-1-scored-v2.yaml | 33 +- .../cis/coreos-level-1.yaml | 1064 +++++++++++++++++ .../cis/rhels-6-level-1-scored-v2-0-1.yaml | 13 +- .../cis/rhels-7-level-1-scored-v1.yaml | 41 +- .../cis/rhels-7-level-1-scored-v2-1-0.yaml | 35 +- .../cis/rhelw-7-level-1-scored-v1.yaml | 41 +- .../cis/rhelw-7-level-1-scored-v2-1-0.yaml | 35 +- .../ubuntu-1404-level-1-scored-v1-0-0.yaml | 20 +- .../cis/ubuntu-1404-level-1-scored-v1.yaml | 20 +- hubblestack_nova_profiles/top.nova | 4 +- 12 files changed, 1169 insertions(+), 185 deletions(-) create mode 100644 hubblestack_nova_profiles/cis/coreos-level-1.yaml diff --git a/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v1.yaml b/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v1.yaml index b5f0fb4..21ae3be 100644 --- a/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v1.yaml +++ b/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v1.yaml @@ -434,17 +434,6 @@ stat: user: root description: /etc/anacrontab file be owned by root and must have permissions 600 (Scored) - at_allow: - data: - CentOS Linux-7: - - /etc/at.allow: - gid: 0 - group: root - mode: 600 - tag: CIS-6.1.10 - uid: 0 - user: root - description: /etc/at.allow must be owned by root and have persmissions 600 (Scored) at_cron_allow: data: CentOS Linux-7: @@ -469,7 +458,7 @@ stat: tag: CIS-6.1.11 uid: 0 user: root - - /etc/at/allow: + - /etc/at.allow: gid: 0 group: root mode: 600 diff --git a/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v2-1-0.yaml b/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v2-1-0.yaml index 04f57ae..104a919 100644 --- a/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v2-1-0.yaml +++ b/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v2-1-0.yaml @@ -808,34 +808,23 @@ service: - syslog-ng: CIS-4.2.2.1_running description: Ensure syslog-ng service is enabled stat: - at_allow: - data: - CentOS Linux-7: - - /etc/at.allow: - gid: 0 - group: root - mode: 600 - tag: CIS-5.1.8 - uid: 0 - user: root - description: /etc/at.allow must be owned by root and have persmissions 600 (Scored) at_cron_allow: data: CentOS Linux-7: - /etc/cron.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-5.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/at.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-5.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/cron.allow: gid: 0 group: root @@ -1085,7 +1074,7 @@ sysctl: - net.ipv4.icmp_ignore_bogus_error_responses: match_output: '1' tag: CIS-3.2.6 - description: Ensure bogus ICMP responses are ignored + description: Ensure bogus ICMP responses are ignored icmp_redirect_acceptance: data: CentOS Linux-7: @@ -1125,7 +1114,7 @@ sysctl: CentOS Linux-7: - net.ipv6.conf.all.accept_ra: match_output: '0' - tag: CIS 3.3.1 + tag: CIS-3.3.1 - net.ipv6.conf.default.accept_ra: match_output: '0' tag: CIS-3.3.1 diff --git a/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v2.yaml b/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v2.yaml index 592594d..3f20519 100644 --- a/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v2.yaml +++ b/hubblestack_nova_profiles/cis/centos-7-level-1-scored-v2.yaml @@ -788,34 +788,23 @@ service: CentOS Linux-7: - syslog-ng: CIS-4.2.2.1_running stat: - at_allow: - data: - CentOS Linux-7: - - /etc/at.allow: - gid: 0 - group: root - mode: 600 - tag: CIS-5.1.8 - uid: 0 - user: root - description: /etc/at.allow must be owned by root and have persmissions 600 (Scored) at_cron_allow: data: CentOS Linux-7: - /etc/cron.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-5.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/at.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-5.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/cron.allow: gid: 0 group: root @@ -1107,7 +1096,7 @@ sysctl: CentOS Linux-7: - net.ipv6.conf.all.accept_ra: match_output: '0' - tag: CIS 3.3.1 + tag: CIS-3.3.1 - net.ipv6.conf.default.accept_ra: match_output: '0' tag: CIS-3.3.1 diff --git a/hubblestack_nova_profiles/cis/coreos-level-1.yaml b/hubblestack_nova_profiles/cis/coreos-level-1.yaml new file mode 100644 index 0000000..2754a1f --- /dev/null +++ b/hubblestack_nova_profiles/cis/coreos-level-1.yaml @@ -0,0 +1,1064 @@ +grep: + blacklist: + legacy_passwd_entries_group: + data: + *CoreOS*: + - /etc/group: + pattern: '^+:' + tag: CIS-6.2.4 + description: Ensure no legacy "+" entries exist in /etc/group + legacy_passwd_entries_passwd: + data: + *CoreOS*: + - /etc/passwd: + pattern: '^+:' + tag: CIS-6.2.2 + description: Ensure no legacy "+" entries exist in /etc/passwd + legacy_passwd_entries_shadow: + data: + *CoreOS*: + - /etc/shadow: + pattern: '^+:' + tag: CIS-6.2.3 + description: Ensure no legacy "+" entries exist in /etc/shadow + whitelist: + activate_gpg_check: + data: + *CoreOS*: + - /etc/yum.conf: + match_output: gpgcheck=1 + pattern: gpgcheck + tag: CIS-1.2.3 + description: Ensure gpgcheck is globally activated + aide_filesystem_scans: + data: + *CoreOS*: + - /etc/cron.d: + pattern: aide + grep_args: + - '-r' + tag: CIS-1.3.2 + description: Ensure filesystem integrity is regularly checked + boot_loader_passwd: + data: + *CoreOS*: + - /etc/grub.conf: + pattern: password + tag: CIS-1.4.2 + description: Ensure bootloader password is set + chargen_disabled: + data: + Red Hat Enterprise Server-7: + - /etc/xinetd.d/chargen-dgram: + pattern: disable + match_output: yes + tag: CIS-2.1.1 + - /etc/xinetd.d/chargen-stream: + pattern: disable + match_output: yes + tag: CIS-2.1.1 + description: Ensure chargen services are not enabled + daytime_disabled: + data: + Red Hat Enterprise Server-7: + - /etc/xinetd.d/daytime-dgram: + pattern: disable + match_output: yes + tag: CIS-2.1.2 + - /etc/xinetd.d/daytime-stream: + pattern: disable + match_output: yes + tag: CIS-2.1.2 + description: Ensure daytime services are not enabled + discard_disabled: + data: + Red Hat Enterprise Server-7: + - /etc/xinetd.d/discard-dgram: + pattern: disable + match_output: yes + tag: CIS-2.1.3 + - /etc/xinetd.d/discard-stream: + pattern: disable + match_output: yes + tag: CIS-2.1.3 + description: Ensure discard services are not enabled + echo_disabled: + data: + Red Hat Enterprise Server-7: + - /etc/xinetd.d/echo-dgram: + pattern: disable + match_output: yes + tag: CIS-2.1.4 + - /etc/xinetd.d/echo-stream: + pattern: disable + match_output: yes + tag: CIS-2.1.4 + description: Ensure echo services are not enabled + time_disabled: + data: + Red Hat Enterprise Server-7: + - /etc/xinetd.d/time-dgram: + pattern: disable + match_output: yes + tag: CIS-2.1.5 + - /etc/xinetd.d/time-stream: + pattern: disable + match_output: yes + tag: CIS-2.1.5 + description: Ensure time services are not enabled + configure_ntp: + data: + *CoreOS*: + - /etc/ntp.conf: + pattern: ^restrict + match_output: default + tag: CIS-2.2.1.2 + - /etc/ntp.conf: + pattern: restrict -6 default + tag: CIS-2.2.1.2 + - /etc/ntp.conf: + tag: CIS-2.2.1.2 + pattern: '^server' + - /etc/sysconfig/ntpd: + tag: CIS-2.2.1.2 + pattern: 'ntp:ntp' + description: Ensure ntp is configured + configure_chrony: + data: + *CoreOS*: + - /etc/chrony.conf: + tag: CIS-2.2.1.3 + pattern: '^server' + - /etc/sysconfig/chronyd: + tag: CIS-2.2.1.3 + pattern: 'chrony' + description: Ensure chrony is configured + local_mail: + data: + *CoreOS*: + - /etc/postfix/main.cf: + pattern: ^inet_interfaces + match_output: localhost + tag: CIS-2.2.15 + description: Ensure mail transfer agent is configured for local-only mode + default_umask: + data: + *CoreOS*: + - /etc/bashrc: + pattern: umask + match_pattern: '027' + tag: CIS-5.4.4 + - /etc/profile.d: + pattern: umask + match_pattern: '027' + grep_args: + - '-r' + tag: CIS-5.4.4 + description: Ensure default user umask is 027 or more restrictive + disable_mount_cramfs: + data: + *CoreOS*: + - /etc/modprobe.d: + match_output: /bin/true + pattern: cramfs + grep_args: + - '-r' + tag: CIS-1.1.1.1 + description: Ensure mounting of cramfs filesystems is disabled + disable_mount_freevxfs: + data: + *CoreOS*: + - /etc/modprobe.d: + match_output: /bin/true + pattern: freevxfs + grep_args: + - '-r' + tag: CIS-1.1.1.2 + description: Ensure mounting of freevxfs filesystems is disabled + disable_mount_jffs2: + data: + *CoreOS*: + - /etc/modprobe.d: + match_output: /bin/true + pattern: jffs2 + grep_args: + - '-r' + tag: CIS-1.1.1.3 + description: Ensure mounting of jffs2 filesystems is disabled + disable_mount_hfs: + data: + *CoreOS*: + - /etc/modprobe.d: + match_output: /bin/true + pattern: hfs + grep_args: + - '-r' + tag: CIS-1.1.1.4 + description: Ensure mounting of hfs filesystems is disabled + disable_mount_hfsplus: + data: + *CoreOS*: + - /etc/modprobe.d: + match_output: /bin/true + pattern: hfsplus + grep_args: + - '-r' + tag: CIS-1.1.1.5 + description: Ensure mounting of hfsplus filesystems is disabled + disable_mount_squashfs: + data: + *CoreOS*: + - /etc/modprobe.d: + match_output: /bin/true + pattern: squashfs + grep_args: + - '-r' + tag: CIS-1.1.1.6 + description: Ensure mounting of squashfs filesystems is disabled + disable_mount_udf: + data: + *CoreOS*: + - /etc/modprobe.d: + match_output: /bin/true + pattern: udf + grep_args: + - '-r' + tag: CIS-1.1.1.7 + description: Ensure mounting of udf filesystems is disabled + disable_mount_fat: + data: + *CoreOS*: + - /etc/modprobe.d: + match_output: /bin/true + pattern: vfat + grep_args: + - '-r' + tag: CIS-1.1.1.8 + description: Ensure mounting of FAT filesystems is disabled + fstab_dev_shm_partition_nodev: + data: + *CoreOS*: + - /etc/fstab: + match_output: nodev + pattern: /dev/shm + tag: CIS-1.1.15 + description: Ensure nodev option set on /dev/shm partition + fstab_dev_shm_partition_noexec: + data: + *CoreOS*: + - /etc/fstab: + match_output: noexec + pattern: /dev/shm + tag: CIS-1.1.17 + description: Ensure noexec option set on /dev/shm partition + fstab_dev_shm_partition_nosuid: + data: + *CoreOS*: + - /etc/fstab: + match_output: nosuid + pattern: /dev/shm + tag: CIS-1.1.16 + description: Ensure nosuid option set on /dev/shm partition + fstab_home_partition_nodev: + data: + *CoreOS*: + - /etc/fstab: + match_output: nodev + pattern: /home + tag: CIS-1.1.14 + description: Ensure nodev option set on /home partition + fstab_tmp_partition_nodev: + data: + *CoreOS*: + - /etc/fstab: + match_output: nodev + pattern: /tmp + tag: CIS-1.1.3 + description: Ensure nodev option set on /tmp partition + fstab_tmp_partition_noexec: + data: + *CoreOS*: + - /etc/fstab: + match_output: noexec + pattern: /tmp + tag: CIS-1.1.5 + description: Ensure noexec option set on /tmp partition + fstab_tmp_partition_nosuid: + data: + *CoreOS*: + - /etc/fstab: + match_output: nosuid + pattern: /tmp + tag: CIS-1.1.4 + description: Ensure nosuid option set on /tmp partition + hosts_allow: + data: + *CoreOS*: + - /etc/hosts.allow: + pattern: ALL + tag: CIS-3.4.2 + description: Ensure /etc/hosts.allow is configured + hosts_deny: + data: + *CoreOS*: + - /etc/hosts.deny: + pattern: ALL + tag: CIS-3.4.3 + description: Ensure /etc/hosts.deny is configured + firewall_default_deny: + data: + *CoreOS*: + - /etc/sysconfig/iptables: + pattern: :INPUT + match_output: DROP + tag: CIS-3.6.2 + - /etc/sysconfig/iptables: + pattern: :FORWARD + match_pattern: DROP + tag: CIS-3.6.2 + - /etc/sysconfig/iptables: + pattern: :OUTPUT + match_output: DROP + tag: CIS-3.6.2 + description: Ensure default deny firewall policy + firewall_accept_lo: + data: + *CoreOS*: + - /etc/sysconfig/iptables: + pattern: lo + match_output: ACCEPT + tag: CIS-3.6.3 + description: Ensure loopback traffic is configured + rsyslog_file_perms: + data: + *CoreOS*: + - /etc/rsyslog.conf: + pattern: '^\$FileCreateMode' + match_output: '0640' + tag: CIS-4.2.1.3 + description: Ensure rsyslog default file permissions configured + rsyslog_remote_logging: + data: + *CoreOS*: + - /etc/rsyslog.conf: + pattern: ^*.*[^I][^I]*@ + tag: CIS-4.2.1.4 + description: Ensure rsyslog is configured to send logs to a remote log host + syslog-ng_file_perms: + data: + *CoreOS*: + - /etc/syslog-ng/syslog-ng.conf: + pattern: ^options + match_output: 'perm(0640)' + tag: CIS-4.2.2.3 + description: Ensure syslog-ng default file permissions configured + limit_password_reuse: + data: + *CoreOS*: + - /etc/pam.d/system-auth: + pattern: '"^password\s+sufficient\s+pam_unix\.so.*"' + match_output: remember=5 + grep_args: + - '-E' + tag: CIS-5.3.3 + description: Ensure password reuse is limited + password_hash: + data: + *CoreOS*: + - /etc/pam.d/password-auth: + pattern: '"^password\s+\w+\s+pam_unix\.so"' + match_output: sha512 + grep_args: + - '-E' + tag: CIS-5.3.4 + description: Ensure password hashing algorithm is SHA-512 + limit_su_command_access: + data: + *CoreOS*: + - /etc/pam.d/su: + match_output: use_uid + pattern: pam_wheel.so + tag: CIS-5.6 + - /etc/group: + pattern: wheel + tag: CIS-5.6 + description: Ensure access to the su command is restricted + pam_pwquality_try_first_pass: + data: + *CoreOS*: + - /etc/pam.d/system-auth: + match_output: try_first_pass + pattern: pam_pwquality.so + tag: CIS-5.3.1 + - /etc/pam.d/system-auth: + match_output: retry=3 + pattern: pam_pwquality.so + tag: CIS-5.3.1 + - /etc/security/pwquality.conf: + pattern: minlen + match_output: '14' + tag: CIS-5.3.1 + - /etc/security/pwquality.conf: + pattern: dcredit + match_output: '-1' + tag: CIS-5.3.1 + - /etc/security/pwquality.conf: + pattern: ucredit + match_output: '-1' + tag: CIS-5.3.1 + - /etc/security/pwquality.conf: + pattern: ocredit + match_output: '-1' + tag: CIS-5.3.1 + - /etc/security/pwquality.conf: + pattern: lcredit + match_output: '-1' + tag: CIS-5.3.1 + description: Ensure password creation requirements are configured + passwd_change_min_days: + data: + *CoreOS*: + - /etc/login.defs: + match_output: '7' + pattern: PASS_MIN_DAYS + tag: CIS-5.4.1.2 + description: Ensure minimum days between password changes is 7 or more + passwd_expiration_days: + data: + *CoreOS*: + - /etc/login.defs: + match_output: '90' + pattern: PASS_MAX_DAYS + tag: CIS-5.4.1.1 + description: Ensure password expiration is 90 days or less + passwd_expiry_warning: + data: + *CoreOS*: + - /etc/login.defs: + match_output: '7' + pattern: PASS_WARN_AGE + tag: CIS-5.4.1.3 + description: Ensure password expiration warning days is 7 or more + passwd_inactive: + data: + *CoreOS*: + - /etc/default/useradd: + pattern: INACTIVE=30 + tag: CIS-5.4.1.4 + description: Ensure inactive password lock is 30 days or less + restrict_core_dumps: + data: + *CoreOS*: + - /etc/security/limits.conf: + match_output: '0' + pattern: hard core + tag: CIS-1.5.1 + description: Ensure core dumps are restricted + rsyslog_remote_logging: + data: + *CoreOS*: + - /etc/rsyslog.conf: + pattern: ^*.*[^I][^I]*@ + tag: CIS-4.2.1.4 + description: Ensure rsyslog is configured to send logs to a remote log host + sshd_approved_cipher: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + match_output: 'aes256-ctr,aes192-ctr,aes128-ctr' + pattern: Ciphers + tag: CIS-5.2.11 + description: Ensure only approved ciphers are used + sshd_banner: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + pattern: ^Banner + tag: CIS-5.2.16 + description: Ensure SSH warning banner is configured + sshd_disable_root_login: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + match_output: PermitRootLogin no + pattern: ^PermitRootLogin + tag: CIS-5.2.8 + description: Ensure SSH root login is disabled + sshd_hostbased_auth: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + match_output: HostbasedAuthentication no + pattern: ^HostbasedAuthentication + tag: CIS-5.2.7 + description: Ensure SSH HostbasedAuthentication is disabled + sshd_idle_timeout: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + match_output: ClientAliveInterval 300 + pattern: ^ClientAliveInterval + tag: CIS-5.2.13 + - /etc/ssh/sshd_config: + match_output: ClientAliveCountMax 0 + pattern: ^ClientAliveCountMax + tag: CIS-5.2.13 + description: Ensure SSH Idle Timeout Interval is configured + sshd_gracetime: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + pattern: ^LoginGraceTime + match_output: '60' + tag: CIS-5.2.14 + description: Ensure SSH LoginGraceTime is set to one minute or less + sshd_ignore_rhosts: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + match_output: IgnoreRhosts yes + pattern: ^IgnoreRhosts + tag: CIS-5.2.6 + description: Ensure SSH IgnoreRhosts is enabled + sshd_limit_access: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + pattern: ^AllowUsers + tag: CIS-5.2.15 + - /etc/ssh/sshd_config: + pattern: ^AllowGroups + tag: CIS-5.2.15 + - /etc/ssh/sshd_config: + pattern: ^DenyUsers + tag: CIS-5.2.15 + - /etc/ssh/sshd_config: + pattern: ^DenyGroups + tag: CIS-5.2.15 + description: Ensure SSH access is limited + sshd_loglevel_info: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + match_output: LogLevel INFO + pattern: ^LogLevel + tag: CIS-5.2.3 + description: Ensure SSH LogLevel is set to INFO + sshd_max_auth_retries: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + match_output: MaxAuthTries 4 + pattern: ^MaxAuthTries + tag: CIS-5.2.5 + description: Ensure SSH MaxAuthTries is set to 4 or less + sshd_permit_empty_passwords: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + match_output: PermitEmptyPasswords no + pattern: ^PermitEmptyPasswords + tag: CIS-5.2.9 + description: Ensure SSH PermitEmptyPasswords is disabled + sshd_permit_user_environment: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + match_output: PermitUserEnvironment no + pattern: ^PermitUserEnvironment + tag: CIS-5.2.10 + description: Ensure SSH PermitUserEnvironment is disabled + sshd_protocol_2: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + match_output: Protocol 2 + pattern: ^Protocol + tag: CIS-5.2.2 + description: Ensure SSH Protocol is set to 2 + sshd_x11_forwarding: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + match_output: X11Forwarding no + pattern: ^X11Forwarding + tag: CIS-5.2.4 + description: Ensure SSH X11 forwarding is disabled + lockout_account: + data: + *CoreOS*: + - /etc/pam.d/system-auth: + pattern: '"^auth\s+required\s+pam_faillock\.so.*"' + match_output: deny=5 + grep_args: + - '-E' + tag: CIS-5.3.2 + - /etc/pam.d/password-auth: + pattern: '"^auth\s+required\s+pam_faillock\.so.*"' + match_output: deny=5 + grep_args: + - '-E' + tag: CIS-5.3.2 + description: Ensure lockout for failed password attempts is configured +service: + blacklist: + autofs: + data: + *CoreOS*: + - autofs: CIS-1.1.22 + description: Disable Automounting + rsync: + data: + *CoreOS*: + - rsyncd: CIS-2.2.20 + description: Ensure rsync service is not enabled + nfs: + data: + *CoreOS*: + - nfs: CIS-2.2.7 + description: Ensure NFS and RPC are not enabled + rpc: + data: + *CoreOS*: + - rpcbind: CIS-2.2.7 + description: Ensure NFS and RPC are not enabled + named: + data: + *CoreOS*: + - named: CIS-2.2.8 + description: Ensure DNS Server is not enabled + httpd: + data: + *CoreOS*: + - httpd: CIS-2.2.10 + description: Ensure HTTP server is not enabled + pop3_imap: + data: + Red Hat Enterprise LInux Server-7: + - dovecot: CIS-2.2.11 + description: Ensure IMAP and POP3 server is not enabled + samba: + data: + *CoreOS*: + - smb: CIS-2.2.12 + description: Ensure Samba is not enabled + http_proxy: + data: + *CoreOS*: + - squid: CIS-2.2.13 + description: Ensure HTTP Proxy Server is not enabled + snmp: + data: + *CoreOS*: + - snmpd: CIS-2.2.14 + description: Ensure SNMP Server is not enabled + whitelist: + auditd_running: + data: + *CoreOS*: + - auditd: CIS-4.1.1.1_running + description: auditd should be running + crond_running: + data: + *CoreOS*: + - crond: CIS-5.1.1_running + description: Ensure cron daemon is enabled + iptables_running: + data: + *CoreOS*: + - iptables: CIS-3.6.1_running + description: Ensure iptables is installed + rsyslogd_running: + data: + *CoreOS*: + - rsyslog: CIS-4.2.1.1_running + description: Ensure rsyslog Service is enabled + syslog-ng_running: + data: + *CoreOS*: + - syslog-ng: CIS-4.2.2.1_running + description: Ensure syslog-ng service is enabled +stat: + at_allow: + data: + *CoreOS*: + - /etc/at.allow: + gid: 0 + group: root + mode: 600 + tag: CIS-5.1.8 + uid: 0 + user: root + description: /etc/at.allow must be owned by root and have persmissions 600 (Scored) + at_cron_allow: + data: + *CoreOS*: + - /etc/cron.deny: + gid: 0 + group: root + mode: 600 + tag: CIS-5.1.8 + uid: 0 + user: root + - /etc/at.deny: + gid: 0 + group: root + mode: 600 + tag: CIS-5.1.8 + uid: 0 + user: root + - /etc/cron.allow: + gid: 0 + group: root + mode: 600 + tag: CIS-5.1.8 + uid: 0 + user: root + - /etc/at.allow: + gid: 0 + group: root + mode: 600 + tag: CIS-5.1.8 + uid: 0 + user: root + description: Ensure at/cron is restricted to authorized users + cron_d: + data: + *CoreOS*: + - /etc/cron.d: + gid: 0 + group: root + mode: 700 + tag: CIS-5.1.7 + uid: 0 + user: root + description: Ensure permissions on /etc/cron.d are configured + cron_daily: + data: + *CoreOS*: + - /etc/cron.daily: + gid: 0 + group: root + mode: 700 + tag: CIS-5.1.4 + uid: 0 + user: root + description: Ensure permissions on /etc/cron.daily are configured + cron_hourly: + data: + *CoreOS*: + - /etc/cron.hourly: + gid: 0 + group: root + mode: 700 + tag: CIS-5.1.3 + uid: 0 + user: root + description: Ensure permissions on /etc/cron.hourly are configured + cron_monthly: + data: + *CoreOS*: + - /etc/cron.monthly: + gid: 0 + group: root + mode: 700 + tag: CIS-5.1.6 + uid: 0 + user: root + description: Ensure permissions on /etc/cron.monthly are configured + cron_weekly: + data: + *CoreOS*: + - /etc/cron.weekly: + gid: 0 + group: root + mode: 700 + tag: CIS-5.1.5 + uid: 0 + user: root + description: Ensure permissions on /etc/cron.weekly are configured + crontab: + data: + *CoreOS*: + - /etc/crontab: + gid: 0 + group: root + mode: 600 + tag: CIS-5.1.2 + uid: 0 + user: root + description: Ensure permissions on /etc/crontab are configured + passwd_own_perm: + data: + *CoreOS*: + - /etc/passwd: + group: root + tag: CIS-6.1.2 + uid: 0 + user: root + description: Ensure permissions on /etc/passwd are configured + shadow_own_perm: + data: + *CoreOS*: + - /etc/shadow: + gid: 0 + group: root + mode: 000 + tag: CIS-6.1.3 + uid: 0 + user: root + description: Ensure permissions on /etc/shadow are configured + group_own_perm: + data: + *CoreOS*: + - /etc/group: + gid: 0 + group: root + mode: 644 + tag: CIS-6.1.4 + uid: 0 + user: root + description: Ensure permissions on /etc/group are configured + gshadow_own_perm: + data: + *CoreOS*: + - /etc/gshadow: + gid: 0 + group: root + mode: 0 + tag: CIS-6.1.5 + uid: 0 + user: root + description: Ensure permissions on /etc/gshadow are configured + passwd-_own_perm: + data: + *CoreOS*: + - /etc/passwd-: + group: root + tag: CIS-6.1.6 + uid: 0 + user: root + description: Ensure permissions on /etc/passwd- are configured + shadow-_own_perm: + data: + *CoreOS*: + - /etc/shadow-: + gid: 0 + group: root + mode: 000 + tag: CIS-6.1.7 + uid: 0 + user: root + description: Ensure permissions on /etc/shadow- are configured + group-_own_perm: + data: + *CoreOS*: + - /etc/group-: + gid: 0 + group: root + mode: 644 + tag: CIS-6.1.8 + uid: 0 + user: root + description: Ensure permissions on /etc/group- are configured + gshadow-_own_perm: + data: + *CoreOS*: + - /etc/gshadow-: + gid: 0 + group: root + mode: 0 + tag: CIS-6.1.9 + uid: 0 + user: root + description: Ensure permissions on /etc/gshadow- are configured + grub_conf_own_perm: + data: + *CoreOS*: + - /etc/grub.conf: + gid: 0 + group: root + mode: 600 + tag: CIS-1.4.1 + uid: 0 + user: root + description: Ensure permissions on bootloader config are configured + hosts_allow: + data: + *CoreOS*: + - /etc/hosts.allow: + gid: 0 + group: root + mode: 644 + tag: CIS-3.4.4 + uid: 0 + user: root + description: Ensure permissions on /etc/hosts.allow are configured + hosts_deny: + data: + *CoreOS*: + - /etc/hosts.deny: + gid: 0 + group: root + mode: 644 + tag: CIS-3.4.5 + uid: 0 + user: root + description: Ensure permissions on /etc/hosts.deny are 644 + sshd_config: + data: + *CoreOS*: + - /etc/ssh/sshd_config: + gid: 0 + group: root + mode: 600 + tag: CIS-5.2.1 + uid: 0 + user: root + description: Ensure permissions on /etc/ssh/sshd_config are configured + warning_banner_motd: + data: + *CoreOS*: + - /etc/motd: + gid: 0 + group: root + mode: 644 + tag: CIS-1.7.1.4 + uid: 0 + user: root + description: Ensure permissions on /etc/motd are configured + warning_banner_issue: + data: + *CoreOS*: + - /etc/issue: + gid: 0 + group: root + mode: 644 + tag: CIS-1.7.1.5 + uid: 0 + user: root + description: Ensure permissions on /etc/issue are configured + warning_banner_issue.net: + data: + *CoreOS*: + - /etc/issue.net: + gid: 0 + group: root + mode: 644 + tag: CIS-1.7.1.6 + uid: 0 + user: root + description: Ensure permissions on /etc/issue.net are configured +sysctl: + bad_error_message_protection: + data: + *CoreOS*: + - net.ipv4.icmp_ignore_bogus_error_responses: + match_output: '1' + tag: CIS-3.2.6 + description: Ensure bogus ICMP responses are ignored + icmp_redirect_acceptance: + data: + *CoreOS*: + - net.ipv4.conf.all.accept_redirects: + match_output: '0' + tag: CIS-3.2.2 + - net.ipv4.conf.default.accept_redirects: + match_output: '0' + tag: CIS-3.2.2 + description: Ensure ICMP redirects are not accepted + ignore_broadcast_requests: + data: + *CoreOS*: + - net.ipv4.icmp_echo_ignore_broadcasts: + match_output: '1' + tag: CIS-3.2.5 + description: Ensure broadcast ICMP requests are ignored + ip_forwarding: + data: + *CoreOS*: + - net.ipv4.ip_forward: + match_output: '0' + tag: CIS-3.1.1 + description: Ensure IP forwarding is disabled + log_suspicious_packets: + data: + *CoreOS*: + - net.ipv4.conf.all.log_martians: + match_output: '1' + tag: CIS-3.2.4 + - net.ipv4.conf.default.log_martians: + match_output: '1' + tag: CIS-3.2.4 + description: Ensure suspicious packets are logged + ipv6_adverts: + data: + *CoreOS*: + - net.ipv6.conf.all.accept_ra: + match_output: '0' + tag: CIS-3.3.1 + - net.ipv6.conf.default.accept_ra: + match_output: '0' + tag: CIS-3.3.1 + description: Ensure IPv6 router advertisements are not accepted + ipv6_redir: + data: + *CoreOS*: + - net.ipv6.conf.all.accept_redirects: + match_output: '0' + tag: CIS-3.3.2 + - net.ipv6.conf.default.accept_redirects: + match_output: '0' + tag: CIS-3.3.2 + description: Ensure IPv6 redirects are not accepted + randomize_va_space: + data: + *CoreOS*: + - kernel.randomize_va_space: + match_output: '2' + tag: CIS-1.5.3 + description: Ensure address space layout randomization (ASLR) is enabled + restrict_suid_core_dumps: + data: + *CoreOS*: + - fs.suid_dumpable: + match_output: '0' + tag: CIS-1.5.1 + description: Ensure core dumps are restricted + secure_icmp_redirect_acceptance: + data: + *CoreOS*: + - net.ipv4.conf.all.secure_redirects: + match_output: '0' + tag: CIS-3.2.3 + - net.ipv4.conf.default.secure_redirects: + match_output: '0' + tag: CIS-3.2.3 + description: Ensure secure ICMP redirects are not accepted + send_packet_redirect: + data: + *CoreOS*: + - net.ipv4.conf.all.send_redirects: + match_output: '0' + tag: CIS-3.1.2 + - net.ipv4.conf.default.send_redirects: + match_output: '0' + tag: CIS-3.1.2 + description: Ensure packet redirect sending is disabled + source_routed_packet_acceptance: + data: + *CoreOS*: + - net.ipv4.conf.all.accept_source_route: + match_output: '0' + tag: CIS-3.2.1 + - net.ipv4.conf.default.accept_source_route: + match_output: '0' + tag: CIS-3.2.1 + description: Ensure source routed packets are not accepted + tcp_syn_cookies: + data: + *CoreOS*: + - net.ipv4.tcp_syncookies: + match_output: '1' + tag: CIS-3.2.8 + description: Ensure TCP SYN Cookies is enabled diff --git a/hubblestack_nova_profiles/cis/rhels-6-level-1-scored-v2-0-1.yaml b/hubblestack_nova_profiles/cis/rhels-6-level-1-scored-v2-0-1.yaml index 2622b6f..c011d54 100644 --- a/hubblestack_nova_profiles/cis/rhels-6-level-1-scored-v2-0-1.yaml +++ b/hubblestack_nova_profiles/cis/rhels-6-level-1-scored-v2-0-1.yaml @@ -472,15 +472,10 @@ pkg: description: Ensure rsyslog or syslog-ng is installed service: whitelist: - anacron_running: - data: - Red Hat Enterprise Linux Server-6: - - cronie-anacron: CIS-6.1.1_running - description: anacron should be running auditd_running: data: Red Hat Enterprise Linux Server-6: - - auditd: CIS-5.2_running + - auditd: CIS-4.1.2_running description: auditd should be running crond_running: data: @@ -490,12 +485,12 @@ service: iptables_running: data: Red Hat Enterprise Linux Server-6: - - iptables: CIS-4.7_running - description: + - iptables: CIS-3.6.1_running + description: iptables should be running rsyslogd_running: data: Red Hat Enterprise Linux Server-6: - - rsyslogd: CIS-5.1.2_running + - rsyslogd: CIS-4.2.1_running description: rsyslogd should be running stat: anacrontab: diff --git a/hubblestack_nova_profiles/cis/rhels-7-level-1-scored-v1.yaml b/hubblestack_nova_profiles/cis/rhels-7-level-1-scored-v1.yaml index eda4551..3557038 100644 --- a/hubblestack_nova_profiles/cis/rhels-7-level-1-scored-v1.yaml +++ b/hubblestack_nova_profiles/cis/rhels-7-level-1-scored-v1.yaml @@ -787,46 +787,35 @@ service: Red Hat Enterprise Linux Server-7: - syslog-ng: CIS-4.2.2.1_running stat: - at_allow: - data: - Red Hat Enterprise Linux Server-7: - - /etc/at.allow: - gid: 0 - group: root - mode: 600 - tag: CIS-5.1.8 - uid: 0 - user: root - description: /etc/at.allow must be owned by root and have persmissions 600 (Scored) at_cron_allow: data: Red Hat Enterprise Linux Server-7: - /etc/cron.deny: - gid: 0 - group: root - mode: 600 - tag: CIS-5.1.8 - uid: 0 - user: root + gid: null + group: null + mode: null + tag: CIS-6.1.11 + uid: null + user: null - /etc/at.deny: - gid: 0 - group: root - mode: 600 - tag: CIS-5.1.8 - uid: 0 - user: root + gid: null + group: null + mode: null + tag: CIS-6.1.11 + uid: null + user: null - /etc/cron.allow: gid: 0 group: root mode: 600 - tag: CIS-5.1.8 + tag: CIS-6.1.11 uid: 0 user: root - /etc/at.allow: gid: 0 group: root mode: 600 - tag: CIS-5.1.8 + tag: CIS-6.1.11 uid: 0 user: root description: Restrict at/cron to authorized users (Scored) @@ -1106,7 +1095,7 @@ sysctl: Red Hat Enterprise Linux Server-7: - net.ipv6.conf.all.accept_ra: match_output: '0' - tag: CIS 3.3.1 + tag: CIS-3.3.1 - net.ipv6.conf.default.accept_ra: match_output: '0' tag: CIS-3.3.1 diff --git a/hubblestack_nova_profiles/cis/rhels-7-level-1-scored-v2-1-0.yaml b/hubblestack_nova_profiles/cis/rhels-7-level-1-scored-v2-1-0.yaml index 113be17..c0e3e04 100644 --- a/hubblestack_nova_profiles/cis/rhels-7-level-1-scored-v2-1-0.yaml +++ b/hubblestack_nova_profiles/cis/rhels-7-level-1-scored-v2-1-0.yaml @@ -808,34 +808,23 @@ service: - syslog-ng: CIS-4.2.2.1_running description: Ensure syslog-ng service is enabled stat: - at_allow: - data: - Red Hat Enterprise Linux Server-7: - - /etc/at.allow: - gid: 0 - group: root - mode: 600 - tag: CIS-5.1.8 - uid: 0 - user: root - description: /etc/at.allow must be owned by root and have persmissions 600 (Scored) at_cron_allow: data: Red Hat Enterprise Linux Server-7: - /etc/cron.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-5.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/at.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-5.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/cron.allow: gid: 0 group: root @@ -1085,7 +1074,7 @@ sysctl: - net.ipv4.icmp_ignore_bogus_error_responses: match_output: '1' tag: CIS-3.2.6 - description: Ensure bogus ICMP responses are ignored + description: Ensure bogus ICMP responses are ignored icmp_redirect_acceptance: data: Red Hat Enterprise Linux Server-7: @@ -1125,7 +1114,7 @@ sysctl: Red Hat Enterprise Linux Server-7: - net.ipv6.conf.all.accept_ra: match_output: '0' - tag: CIS 3.3.1 + tag: CIS-3.3.1 - net.ipv6.conf.default.accept_ra: match_output: '0' tag: CIS-3.3.1 diff --git a/hubblestack_nova_profiles/cis/rhelw-7-level-1-scored-v1.yaml b/hubblestack_nova_profiles/cis/rhelw-7-level-1-scored-v1.yaml index 3e72282..98f60f7 100644 --- a/hubblestack_nova_profiles/cis/rhelw-7-level-1-scored-v1.yaml +++ b/hubblestack_nova_profiles/cis/rhelw-7-level-1-scored-v1.yaml @@ -767,46 +767,35 @@ service: Red Hat Enterprise Linux Workstation-7: - syslog-ng: CIS-4.2.2.1_running stat: - at_allow: - data: - Red Hat Enterprise Linux Workstation-7: - - /etc/at.allow: - gid: 0 - group: root - mode: 600 - tag: CIS-5.1.8 - uid: 0 - user: root - description: /etc/at.allow must be owned by root and have persmissions 600 (Scored) at_cron_allow: data: Red Hat Enterprise Linux Workstation-7: - /etc/cron.deny: - gid: 0 - group: root - mode: 600 - tag: CIS-5.1.8 - uid: 0 - user: root + gid: null + group: null + mode: null + tag: CIS-6.1.11 + uid: null + user: null - /etc/at.deny: - gid: 0 - group: root - mode: 600 - tag: CIS-5.1.8 - uid: 0 - user: root + gid: null + group: null + mode: null + tag: CIS-6.1.11 + uid: null + user: null - /etc/cron.allow: gid: 0 group: root mode: 600 - tag: CIS-5.1.8 + tag: CIS-6.1.11 uid: 0 user: root - /etc/at.allow: gid: 0 group: root mode: 600 - tag: CIS-5.1.8 + tag: CIS-6.1.11 uid: 0 user: root description: Restrict at/cron to authorized users (Scored) @@ -1086,7 +1075,7 @@ sysctl: Red Hat Enterprise Linux Workstation-7: - net.ipv6.conf.all.accept_ra: match_output: '0' - tag: CIS 3.3.1 + tag: CIS-3.3.1 - net.ipv6.conf.default.accept_ra: match_output: '0' tag: CIS-3.3.1 diff --git a/hubblestack_nova_profiles/cis/rhelw-7-level-1-scored-v2-1-0.yaml b/hubblestack_nova_profiles/cis/rhelw-7-level-1-scored-v2-1-0.yaml index 1fc3ee8..5f4a038 100644 --- a/hubblestack_nova_profiles/cis/rhelw-7-level-1-scored-v2-1-0.yaml +++ b/hubblestack_nova_profiles/cis/rhelw-7-level-1-scored-v2-1-0.yaml @@ -788,34 +788,23 @@ service: - syslog-ng: CIS-4.2.2.1_running description: Ensure syslog-ng service is enabled stat: - at_allow: - data: - Red Hat Enterprise Linux Workstation-7: - - /etc/at.allow: - gid: 0 - group: root - mode: 600 - tag: CIS-5.1.8 - uid: 0 - user: root - description: /etc/at.allow must be owned by root and have persmissions 600 (Scored) at_cron_allow: data: Red Hat Enterprise Linux Workstation-7: - /etc/cron.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-5.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/at.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-5.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/cron.allow: gid: 0 group: root @@ -1065,7 +1054,7 @@ sysctl: - net.ipv4.icmp_ignore_bogus_error_responses: match_output: '1' tag: CIS-3.2.6 - description: Ensure bogus ICMP responses are ignored + description: Ensure bogus ICMP responses are ignored icmp_redirect_acceptance: data: Red Hat Enterprise Linux Workstation-7: @@ -1105,7 +1094,7 @@ sysctl: Red Hat Enterprise Linux Workstation-7: - net.ipv6.conf.all.accept_ra: match_output: '0' - tag: CIS 3.3.1 + tag: CIS-3.3.1 - net.ipv6.conf.default.accept_ra: match_output: '0' tag: CIS-3.3.1 diff --git a/hubblestack_nova_profiles/cis/ubuntu-1404-level-1-scored-v1-0-0.yaml b/hubblestack_nova_profiles/cis/ubuntu-1404-level-1-scored-v1-0-0.yaml index b1b2881..fd794e7 100644 --- a/hubblestack_nova_profiles/cis/ubuntu-1404-level-1-scored-v1-0-0.yaml +++ b/hubblestack_nova_profiles/cis/ubuntu-1404-level-1-scored-v1-0-0.yaml @@ -788,19 +788,19 @@ stat: data: Ubuntu-14.04: - /etc/cron.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-9.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/at.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-9.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/cron.allow: gid: 0 group: root diff --git a/hubblestack_nova_profiles/cis/ubuntu-1404-level-1-scored-v1.yaml b/hubblestack_nova_profiles/cis/ubuntu-1404-level-1-scored-v1.yaml index 31f043a..2ad5439 100644 --- a/hubblestack_nova_profiles/cis/ubuntu-1404-level-1-scored-v1.yaml +++ b/hubblestack_nova_profiles/cis/ubuntu-1404-level-1-scored-v1.yaml @@ -788,19 +788,19 @@ stat: data: Ubuntu-14.04: - /etc/cron.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-9.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/at.deny: - gid: 0 - group: root - mode: 600 + gid: null + group: null + mode: null tag: CIS-9.1.8 - uid: 0 - user: root + uid: null + user: null - /etc/cron.allow: gid: 0 group: root diff --git a/hubblestack_nova_profiles/top.nova b/hubblestack_nova_profiles/top.nova index 247903a..6f1963a 100644 --- a/hubblestack_nova_profiles/top.nova +++ b/hubblestack_nova_profiles/top.nova @@ -3,8 +3,10 @@ # Subscribes to CIS, cve_scan, and misc.yaml for miscellaneous checks nova: - 'G@kernel:Linux': + 'G@kernel:Linux and not G@osfinger:*CoreOS*': - cve.scan-v2 + 'G@osfinger:*CoreOS*': + - cis.coreos-level-1 'G@osfinger:CentOS-6': - cis.centos-6-level-1-scored-v2-0-1 'G@osfinger:CentOS*Linux-7': From 40b52bc840489f04a1d6546a4e1aa730c6cc1fc3 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Tue, 28 Feb 2017 13:14:49 -0700 Subject: [PATCH 08/18] Pull in latest query changes from upstream hubblestack_data --- hubblestack_nebula/hubblestack_nebula_queries.yaml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/hubblestack_nebula/hubblestack_nebula_queries.yaml b/hubblestack_nebula/hubblestack_nebula_queries.yaml index 41c5815..4f2e950 100644 --- a/hubblestack_nebula/hubblestack_nebula_queries.yaml +++ b/hubblestack_nebula/hubblestack_nebula_queries.yaml @@ -1,12 +1,10 @@ fifteen_min: - query_name: running_procs - query: SELECT p.name AS process, p.pid AS process_id, p.cmdline, p.cwd, p.on_disk, p.resident_size AS mem_used, p.parent, g.groupname, u.username AS user, p.path, h.md5, h.sha1, h.sha256 FROM processes AS p LEFT JOIN users AS u ON p.uid=u.uid LEFT JOIN groups AS g ON p.gid=g.gid LEFT JOIN hash AS h ON p.path=h.path WHERE parent IS NOT 2 AND username NOTNULL AND process NOTNULL AND parent NOTNULL; + query: SELECT p.name AS process, p.pid AS process_id, p.cmdline, p.cwd, p.on_disk, p.resident_size AS mem_used, p.parent, g.groupname, u.username AS user, eu.username AS effective_username, eg.groupname AS effective_groupname, p.path, h.md5, h.sha1, h.sha256 FROM processes AS p LEFT JOIN users AS u ON p.uid=u.uid LEFT JOIN users AS eu ON p.euid=eu.uid LEFT JOIN groups AS g ON p.gid=g.gid LEFT JOIN groups AS eg ON p.gid=eg.gid LEFT JOIN hash AS h ON p.path=h.path WHERE parent IS NOT 2 AND (process NOTNULL OR parent NOTNULL); - query_name: established_outbound - query: SELECT t.iso_8601 AS _time, pos.family, h.*, ltrim(pos.local_address, ':f') AS src, pos.local_port AS src_port, pos.remote_port AS dest_port, ltrim(remote_address, ':f') AS dest, name, p.path AS file_path, cmdline, pos.protocol, lp.protocol FROM process_open_sockets AS pos JOIN processes AS p ON p.pid=pos.pid LEFT JOIN time AS t LEFT JOIN (SELECT * FROM listening_ports) AS lp ON lp.port=pos.local_port AND lp.protocol=pos.protocol LEFT JOIN hash AS h ON h.path=p.path WHERE NOT remote_address='' AND NOT remote_address='::' AND NOT remote_address='0.0.0.0' AND NOT remote_address='127.0.0.1' AND port is NULL; + query: SELECT t.iso_8601 AS _time, pos.family, h.*, ltrim(pos.local_address, ':f') AS src_connection_ip, pos.local_port AS src_connection_port, pos.remote_port AS dest_connection_port, ltrim(remote_address, ':f') AS dest_connection_ip, name, p.path AS file_path, cmdline, pos.protocol FROM process_open_sockets AS pos JOIN processes AS p ON p.pid=pos.pid LEFT JOIN time AS t LEFT JOIN (SELECT * FROM listening_ports) AS lp ON lp.port=pos.local_port AND lp.protocol=pos.protocol LEFT JOIN hash AS h ON h.path=p.path WHERE NOT remote_address='' AND NOT remote_address='::' AND NOT remote_address='0.0.0.0' AND NOT remote_address='127.0.0.1' AND port is NULL; - query_name: listening_procs - query: SELECT t.iso_8601 AS _time, h.md5 AS md5, p.pid, name, ltrim(address, ':f') AS address, port, p.path AS file_path, cmdline, root, parent FROM listening_ports AS lp LEFT JOIN processes AS p ON lp.pid=p.pid LEFT JOIN time AS t LEFT JOIN hash AS h ON h.path=p.path WHERE NOT address='127.0.0.1'; - - query_name: suid_binaries - query: SELECT sb.*, t.iso_8601 AS _time, h.sha1, h.sha256 FROM suid_bin AS sb JOIN time AS t LEFT JOIN hash AS h ON sb.path=h.path; + query: SELECT t.iso_8601 AS _time, h.md5 AS md5, p.pid AS process_id, name AS process, ltrim(address, ':f') AS address, port AS dest_port, p.path AS file_path, cmdline, p.on_disk, root, parent, CASE lp.protocol WHEN 6 THEN 'tcp' WHEN 17 THEN 'udp' ELSE lp.protocol END as transport FROM listening_ports AS lp LEFT JOIN processes AS p ON lp.pid=p.pid LEFT JOIN time AS t LEFT JOIN hash AS h ON h.path=p.path; hour: - query_name: crontab query: SELECT c.*,t.iso_8601 AS _time FROM crontab AS c JOIN time AS t; @@ -17,3 +15,7 @@ day: query: select * from os_version; - query_name: interface_addresses query: SELECT interface, address FROM interface_addresses WHERE NOT interface='lo'; + - query_name: uptime + query: SELECT total_seconds AS uptime FROM uptime; + - query_name: suid_binaries + query: SELECT sb.*, t.iso_8601 AS _time, h.sha1, h.sha256 FROM suid_bin AS sb JOIN time AS t LEFT JOIN hash AS h ON sb.path=h.path; From daa76d0e93953501c7107a9ae8e58c98cc543f42 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Tue, 28 Feb 2017 13:17:02 -0700 Subject: [PATCH 09/18] Add osrelease to fallback_osfinger --- _modules/nebula_osquery.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/_modules/nebula_osquery.py b/_modules/nebula_osquery.py index 41dd5d5..b90a6fd 100644 --- a/_modules/nebula_osquery.py +++ b/_modules/nebula_osquery.py @@ -88,7 +88,8 @@ def queries(query_group, ret = [] ret.append( {'fallback_osfinger': { - 'data': [{'osfinger': __grains__.get('osfinger', __grains__.get('osfullname'))}], + 'data': [{'osfinger': __grains__.get('osfinger', __grains__.get('osfullname')), + 'osrelease': __grains__.get('osrelease', __grains__.get('lsb_distrib_release'))}], 'result': True }} ) From 23d18bf2cebd41b3dff0041ae92b4f42d4e6eff6 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 2 Mar 2017 09:15:00 -0700 Subject: [PATCH 10/18] Add aws_account_id to all returners https://github.com/hubblestack/quasar/pull/49 --- _returners/splunk_nebula_return.py | 6 +++++- _returners/splunk_nova_return.py | 6 +++++- _returners/splunk_pulsar_return.py | 8 ++++++-- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/_returners/splunk_nebula_return.py b/_returners/splunk_nebula_return.py index 5ec25f9..17e78d8 100644 --- a/_returners/splunk_nebula_return.py +++ b/_returners/splunk_nebula_return.py @@ -76,12 +76,15 @@ def returner(ret): # Gather amazon information if present aws_ami_id = None aws_instance_id = None + aws_account_id = None try: aws_ami_id = requests.get('http://169.254.169.254/latest/meta-data/ami-id', timeout=1).text aws_instance_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id', timeout=1).text - except requests.exceptions.ConnectTimeout: + aws_account_id = requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document', + timeout=1).json().get('accountId', 'unknown') + except (requests.exceptions.ConnectTimeout, ValueError): # Not on an AWS box pass @@ -120,6 +123,7 @@ def returner(ret): if aws_instance_id is not None: event.update({'aws_ami_id': aws_ami_id}) event.update({'aws_instance_id': aws_instance_id}) + event.update({'aws_account_id': aws_account_id}) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field diff --git a/_returners/splunk_nova_return.py b/_returners/splunk_nova_return.py index 8e374dd..09cb23a 100644 --- a/_returners/splunk_nova_return.py +++ b/_returners/splunk_nova_return.py @@ -75,12 +75,15 @@ def returner(ret): # Gather amazon information if present aws_ami_id = None aws_instance_id = None + aws_account_id = None try: aws_ami_id = requests.get('http://169.254.169.254/latest/meta-data/ami-id', timeout=1).text aws_instance_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id', timeout=1).text - except requests.exceptions.ConnectTimeout: + aws_account_id = requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document', + timeout=1).json().get('accountId', 'unknown') + except (requests.exceptions.ConnectTimeout, ValueError): # Not on an AWS box pass @@ -162,6 +165,7 @@ def returner(ret): if aws_instance_id is not None: event.update({'aws_ami_id': aws_ami_id}) event.update({'aws_instance_id': aws_instance_id}) + event.update({'aws_account_id': aws_account_id}) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field diff --git a/_returners/splunk_pulsar_return.py b/_returners/splunk_pulsar_return.py index f16ae02..a4b68e8 100644 --- a/_returners/splunk_pulsar_return.py +++ b/_returners/splunk_pulsar_return.py @@ -81,12 +81,15 @@ def returner(ret): # Gather amazon information if present aws_ami_id = None aws_instance_id = None + aws_account_id = None try: aws_ami_id = requests.get('http://169.254.169.254/latest/meta-data/ami-id', timeout=1).text aws_instance_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id', timeout=1).text - except requests.exceptions.ConnectTimeout: + aws_account_id = requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document', + timeout=1).json().get('accountId', 'unknown') + except (requests.exceptions.ConnectTimeout, ValueError): # Not on an AWS box pass @@ -166,7 +169,7 @@ def returner(ret): event['file_hash'] = alert['checksum'] event['file_hash_type'] = alert['checksum_type'] - else: # Windows, win_pulsar + else: # Windows, win_pulsar change = alert['Accesses'] if alert['Hash'] == 'Item is a directory': object_type = 'directory' @@ -213,6 +216,7 @@ def returner(ret): if aws_instance_id is not None: event.update({'aws_ami_id': aws_ami_id}) event.update({'aws_instance_id': aws_instance_id}) + event.update({'aws_account_id': aws_account_id}) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field From bf1d88936815878ccaa6b95ebfda25969c31979c Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 2 Mar 2017 09:19:10 -0700 Subject: [PATCH 11/18] Handle empty return This is only necessary in "saltless" hubble, but I'm putting it here to minimize the differences between the versions --- _returners/slack_pulsar_returner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/_returners/slack_pulsar_returner.py b/_returners/slack_pulsar_returner.py index a311c01..fa68adf 100644 --- a/_returners/slack_pulsar_returner.py +++ b/_returners/slack_pulsar_returner.py @@ -278,11 +278,11 @@ def returner(ret): log.error('slack_pulsar.api_key not defined in salt config') return - if isinstance(ret, dict): + if ret and isinstance(ret, dict): message = ('id: {0}\r\n' 'return: {1}\r\n').format(__opts__['id'], pprint.pformat(ret.get('return'))) - elif isinstance(ret, list): + elif ret and isinstance(ret, list): message = 'id: {0}\r\n' for r in ret: message += pprint.pformat(r.get('return')) From ea0d2c2e221788e7be22ddc08a4addd660317d6c Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 2 Mar 2017 09:25:42 -0700 Subject: [PATCH 12/18] Fix ntpd ntp:ntp check https://github.com/hubblestack/nova/pull/307 --- .../cis/amazon-level-1-scored-v1-0-0.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hubblestack_nova_profiles/cis/amazon-level-1-scored-v1-0-0.yaml b/hubblestack_nova_profiles/cis/amazon-level-1-scored-v1-0-0.yaml index 038e5b8..f674a67 100644 --- a/hubblestack_nova_profiles/cis/amazon-level-1-scored-v1-0-0.yaml +++ b/hubblestack_nova_profiles/cis/amazon-level-1-scored-v1-0-0.yaml @@ -590,7 +590,7 @@ grep: - '/etc/ntp.conf': tag: 'CIS-3.6' pattern: '^server' - - '/etc/sysconfig/ntpd': + - '/etc/init.d/ntpd': tag: 'CIS-3.6' pattern: 'ntp:ntp' description: 'Configure Network Time Protocol (NTP) (Scored)' @@ -665,7 +665,7 @@ grep: tag: 'CIS-6.2.8' pattern: "^PermitRootLogin" match_output: "PermitRootLogin no" - description: 'Set SSH HostbasedAuthentication to No (Scored)' + description: 'Set SSH PermitRootLogin to No (Scored)' sshd_permit_empty_passwords: data: From da0c1f78f632d751cff9a6828794bf931d49d1e6 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 2 Mar 2017 09:30:40 -0700 Subject: [PATCH 13/18] Remove cve scan from default top.nova https://github.com/hubblestack/hubblestack_data/pull/6 --- hubblestack_nova_profiles/top.nova | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hubblestack_nova_profiles/top.nova b/hubblestack_nova_profiles/top.nova index 6f1963a..167482d 100644 --- a/hubblestack_nova_profiles/top.nova +++ b/hubblestack_nova_profiles/top.nova @@ -3,8 +3,6 @@ # Subscribes to CIS, cve_scan, and misc.yaml for miscellaneous checks nova: - 'G@kernel:Linux and not G@osfinger:*CoreOS*': - - cve.scan-v2 'G@osfinger:*CoreOS*': - cis.coreos-level-1 'G@osfinger:CentOS-6': @@ -33,3 +31,5 @@ nova: - cis.amazon-level-1-scored-v1-0-0 #'*': # - misc + #'G@kernel:Linux and not G@osfinger:*CoreOS*': + # - cve.scan-v2 From 92714cbf1e24167ca66341ac1c22f180b449aa50 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 2 Mar 2017 15:08:45 -0700 Subject: [PATCH 14/18] Move output formatting code to hubble.py https://github.com/hubblestack/nova/pull/305 --- _modules/hubble.py | 158 ++++++++++++++++++++++--------- hubblestack_nova/command.py | 64 +------------ hubblestack_nova/cve_scan.py | 2 +- hubblestack_nova/cve_scan_v2.py | 37 ++++---- hubblestack_nova/firewall.py | 64 +------------ hubblestack_nova/grep.py | 64 +------------ hubblestack_nova/misc.py | 64 +------------ hubblestack_nova/netstat.py | 34 ++++--- hubblestack_nova/openssl.py | 64 +------------ hubblestack_nova/pkg.py | 64 +------------ hubblestack_nova/pkgng_audit.py | 5 +- hubblestack_nova/service.py | 64 +------------ hubblestack_nova/stat.py | 64 +------------ hubblestack_nova/sysctl.py | 64 +------------ hubblestack_nova/win_auditpol.py | 64 +------------ hubblestack_nova/win_firewall.py | 64 +------------ hubblestack_nova/win_gp.py | 64 +------------ hubblestack_nova/win_pkg.py | 64 +------------ hubblestack_nova/win_reg.py | 65 +------------ hubblestack_nova/win_secedit.py | 64 +------------ 20 files changed, 176 insertions(+), 1021 deletions(-) diff --git a/_modules/hubble.py b/_modules/hubble.py index 4335739..989811d 100644 --- a/_modules/hubble.py +++ b/_modules/hubble.py @@ -83,9 +83,7 @@ def audit(configs=None, `hubblestack:nova:show_compliance` in minion config/pillar. show_profile - Whether to add the profile path to the verbose output for audits. - Defaults to True. Configurable via `hubblestack:nova:show_profile` - in minion config/pillar. + DEPRECATED called_from_top Ignore this argument. It is used for distinguishing between user-calls @@ -107,9 +105,7 @@ def audit(configs=None, if configs is None: return top(verbose=verbose, show_success=show_success, - show_compliance=show_compliance, - show_profile=show_profile, - debug=debug) + show_compliance=show_compliance) if __salt__['config.get']('hubblestack:nova:autoload', True): load() @@ -122,8 +118,10 @@ def audit(configs=None, show_success = __salt__['config.get']('hubblestack:nova:show_success', True) if show_compliance is None: show_compliance = __salt__['config.get']('hubblestack:nova:show_compliance', True) - if show_profile is None: - show_profile = __salt__['config.get']('hubblestack:nova:show_profile', True) + if show_profile is not None: + log.warning( + 'Keyword argument \'show_profile\' is no longer supported' + ) if debug is None: debug = __salt__['config.get']('hubblestack:nova:debug', False) @@ -135,6 +133,95 @@ def audit(configs=None, configs = [os.path.join(os.path.sep, os.path.join(*(con.split('.yaml')[0]).split('.'))) for con in configs] + ret = _run_audit(configs, tags, debug=debug) + + terse_results = {} + verbose_results = {} + + # Pull out just the tag and description + terse_results['Failure'] = [] + tags_descriptions = set() + + for tag_data in ret.get('Failure', []): + tag = tag_data['tag'] + description = tag_data.get('description') + if (tag, description) not in tags_descriptions: + terse_results['Failure'].append({tag: description}) + tags_descriptions.add((tag, description)) + + terse_results['Success'] = [] + tags_descriptions = set() + + for tag_data in ret.get('Success', []): + tag = tag_data['tag'] + description = tag_data.get('description') + if (tag, description) not in tags_descriptions: + terse_results['Success'].append({tag: description}) + tags_descriptions.add((tag, description)) + + terse_results['Controlled'] = [] + control_reasons = set() + + for tag_data in ret.get('Controlled', []): + tag = tag_data['tag'] + control_reason = tag_data.get('control', '') + description = tag_data.get('description') + if (tag, description, control_reason) not in control_reasons: + terse_results['Controlled'].append({tag: control_reason}) + control_reasons.add((tag, description, control_reason)) + + # Calculate compliance level + if show_compliance: + compliance = _calculate_compliance(terse_results) + else: + compliance = False + + if not show_success and 'Success' in terse_results: + terse_results.pop('Success') + + if not terse_results['Controlled']: + terse_results.pop('Controlled') + + # Format verbose output as single-key dictionaries with tag as key + if verbose: + verbose_results['Failure'] = [] + + for tag_data in ret.get('Failure', []): + tag = tag_data['tag'] + verbose_results['Failure'].append({tag: tag_data}) + + verbose_results['Success'] = [] + + for tag_data in ret.get('Success', []): + tag = tag_data['tag'] + verbose_results['Success'].append({tag: tag_data}) + + if not show_success and 'Success' in verbose_results: + verbose_results.pop('Success') + + verbose_results['Controlled'] = [] + + for tag_data in ret.get('Controlled', []): + tag = tag_data['tag'] + verbose_results['Controlled'].append({tag: tag_data}) + + if not verbose_results['Controlled']: + verbose_results.pop('Controlled') + + results = verbose_results + else: + results = terse_results + + if compliance: + results['Compliance'] = compliance + + if not called_from_top and not results: + results['Messages'] = 'No audits matched this host in the specified profiles.' + + return results + +def _run_audit(configs, tags, debug): + results = {} # Compile a list of audit data sets which we need to run @@ -174,11 +261,7 @@ def audit(configs=None, # We can revisit if this ever becomes a big bottleneck for key, func in __nova__._dict.iteritems(): try: - ret = func(data_list, - tags, - verbose=verbose, - show_profile=show_profile, - debug=debug) + ret = func(data_list, tags, debug=debug) except Exception as exc: log.error('Exception occurred in nova module:') log.error(traceback.format_exc()) @@ -222,42 +305,25 @@ def audit(configs=None, # Look through the failed results to find audits which match our control config failures_to_remove = [] for i, failure in enumerate(results.get('Failure', [])): - if isinstance(failure, str): - if failure in processed_controls: - failures_to_remove.append(i) - if 'Controlled' not in results: - results['Controlled'] = [] - results['Controlled'].append( - {failure: processed_controls[failure].get('reason')}) - else: # dict - for failure_tag in failure: - if failure_tag in processed_controls: - failures_to_remove.append(i) - if 'Controlled' not in results: - results['Controlled'] = [] - results['Controlled'].append( - {failure_tag: processed_controls[failure_tag].get('reason')}) + failure_tag = failure['tag'] + if failure_tag in processed_controls: + failures_to_remove.append(i) + if 'Controlled' not in results: + results['Controlled'] = [] + failure.update({ + 'control': processed_controls[failure_tag].get('reason') + }) + results['Controlled'].append(failure) # Remove controlled failures from results['Failure'] if failures_to_remove: for failure_index in reversed(sorted(set(failures_to_remove))): results['Failure'].pop(failure_index) - if show_compliance: - compliance = _calculate_compliance(results) - if compliance: - results['Compliance'] = compliance - for key in results.keys(): if not results[key]: results.pop(key) - if not called_from_top and not results: - results['Messages'] = 'No audits matched this host in the specified profiles.' - - if not show_success and 'Success' in results: - results.pop('Success') - return results @@ -321,9 +387,7 @@ def top(topfile='top.nova', `hubblestack:nova:show_compliance` in minion config/pillar. show_profile - Whether to add the profile path to the verbose output for audits. - Defaults to True. Configurable via `hubblestack:nova:show_profile` - in minion config/pillar. + DEPRECATED debug Whether to log additional information to help debug nova. Defaults to @@ -349,8 +413,10 @@ def top(topfile='top.nova', show_success = __salt__['config.get']('hubblestack:nova:show_success', True) if show_compliance is None: show_compliance = __salt__['config.get']('hubblestack:nova:show_compliance', True) - if show_profile is None: - show_profile = __salt__['config.get']('hubblestack:nova:show_profile', True) + if show_profile is not None: + log.warning( + 'Keyword argument \'show_profile\' is no longer supported' + ) if debug is None: debug = __salt__['config.get']('hubblestack:nova:debug', False) @@ -388,9 +454,7 @@ def top(topfile='top.nova', verbose=verbose, show_success=True, show_compliance=False, - show_profile=show_profile, - called_from_top=True, - debug=debug) + called_from_top=True) # Merge in the results for key, val in ret.iteritems(): diff --git a/hubblestack_nova/command.py b/hubblestack_nova/command.py index b52f7f2..70fd6b8 100644 --- a/hubblestack_nova/command.py +++ b/hubblestack_nova/command.py @@ -87,16 +87,13 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Run the command audits contained in the data_list ''' __data__ = {} for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: @@ -170,63 +167,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): else: ret['Failure'].append(tag_data) - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in control_reasons: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: tag_dict}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/cve_scan.py b/hubblestack_nova/cve_scan.py index 16cb6df..cf5bc91 100644 --- a/hubblestack_nova/cve_scan.py +++ b/hubblestack_nova/cve_scan.py @@ -21,7 +21,7 @@ def __virtual__(): return False, 'This module requires Linux and the oscap binary' -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Run the network.netstat command ''' diff --git a/hubblestack_nova/cve_scan_v2.py b/hubblestack_nova/cve_scan_v2.py index 6e0ac7c..76d74a8 100644 --- a/hubblestack_nova/cve_scan_v2.py +++ b/hubblestack_nova/cve_scan_v2.py @@ -92,7 +92,7 @@ def __virtual__(): return not salt.utils.is_windows() -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Main audit function. See module docstring for more information on usage. ''' @@ -224,9 +224,9 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): vulnerable = affected_obj if vulnerable: if vulnerable.score < min_score: - ret['Controlled'].append(vulnerable.get_report(verbose, show_profile, profile)) + ret['Controlled'].append(vulnerable.get_report(profile)) else: - ret['Failure'].append(vulnerable.get_report(verbose, show_profile, profile)) + ret['Failure'].append(vulnerable.get_report(profile)) if tags != '*': log.debug("tags: %s", tags) @@ -379,25 +379,20 @@ def __init__(self, title, pkg, pkg_version, score, operator, reporter, href, cve self.oudated_version = None - def get_report(self, verbose, show_profile, profile): + def get_report(self, profile): ''' Return the dictionary of what should be reported in failures, based on verbose. ''' - uid = self.pkg + '-' + self.pkg_version - if verbose: - report = { - 'href': self.href, - 'affected_version': self.pkg_version, - 'reporter': self.reporter, - 'score': self.score, - 'cve_list': self.cve_list, - 'affected_pkg': self.pkg, - 'local_version': self.oudated_version, - 'description': self.title - } - if show_profile: - report['nova_profile'] = profile - else: - report = self.title - return {uid: report} + return { + 'tag': self.pkg + '-' + self.pkg_version, + 'href': self.href, + 'affected_version': self.pkg_version, + 'reporter': self.reporter, + 'score': self.score, + 'cve_list': self.cve_list, + 'affected_pkg': self.pkg, + 'local_version': self.oudated_version, + 'description': self.title, + 'nova_profile': profile + } diff --git a/hubblestack_nova/firewall.py b/hubblestack_nova/firewall.py index 5aa9799..618b707 100644 --- a/hubblestack_nova/firewall.py +++ b/hubblestack_nova/firewall.py @@ -99,13 +99,10 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): __data__ = {} for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: @@ -163,63 +160,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): else: ret['Failure'].append(tag_data) - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in control_reasons: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: tag_dict}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/grep.py b/hubblestack_nova/grep.py index 9c2f6e8..243a1a0 100644 --- a/hubblestack_nova/grep.py +++ b/hubblestack_nova/grep.py @@ -73,16 +73,13 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Run the grep audits contained in the YAML files processed by __virtual__ ''' __data__ = {} for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: @@ -148,63 +145,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): else: ret['Failure'].append(tag_data) - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in control_reasons: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: tag_dict}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/misc.py b/hubblestack_nova/misc.py index ea7c5cd..50829b5 100644 --- a/hubblestack_nova/misc.py +++ b/hubblestack_nova/misc.py @@ -59,16 +59,13 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Run the misc audits contained in the data_list ''' __data__ = {} for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: @@ -108,63 +105,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): else: ret['Failure'].append(tag_data) - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in control_reasons: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: tag_dict}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/netstat.py b/hubblestack_nova/netstat.py index b9ec1f9..88ad883 100644 --- a/hubblestack_nova/netstat.py +++ b/hubblestack_nova/netstat.py @@ -36,7 +36,7 @@ def __virtual__(): return False, 'No network.netstat function found' -def audit(data_list, tags, verbose=False, show_profile=False, debug=True): +def audit(data_list, tags, debug=True): ''' Run the network.netstat command ''' @@ -49,8 +49,7 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=True): if 'address' in check_args: tag_args = copy.deepcopy(check_args) tag_args['id'] = check - if show_profile: - tag_args['nova_profile'] = profile + tag_args['nova_profile'] = profile if isinstance(check_args['address'], list): for address in check_args['address']: __tags__[address] = tag_args @@ -62,25 +61,24 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=True): return ret for address_data in __salt__['network.netstat'](): - address = address_data['local-address'] + success = False for whitelisted_address in __tags__: - if fnmatch.fnmatch(address, whitelisted_address): - success_data = {address: __tags__[whitelisted_address]['id']} - if verbose: - success_data = {address: __tags__[whitelisted_address]} - success_data[address].update(address_data) - success_data[address]['description'] = __tags__[whitelisted_address]['id'] - ret['Success'].append(success_data) + if fnmatch.fnmatch(address_data['local-address'], whitelisted_address): + address_data.update({ + 'tag': __tags__[whitelisted_address]['address'][0], + 'description': __tags__[whitelisted_address]['id'], + 'nova_profile': __tags__[whitelisted_address]['nova_profile'] + }) + ret['Success'].append(address_data) success = True break if success is False: - failure_data = {address: address_data['program']} - if verbose: - failure_data = {address: {'program': address_data['program']}} - failure_data[address].update(address_data) - failure_data[address]['description'] = address_data['program'] - failure_data[address]['nova_profile'] = 'netstat' - ret['Failure'].append(failure_data) + address_data.update({ + 'tag': address_data['local-address'], + 'description': address_data['program'], + 'nova_profile': 'netstat' + }) + ret['Failure'].append(address_data) return ret diff --git a/hubblestack_nova/openssl.py b/hubblestack_nova/openssl.py index 2515c6d..953951a 100644 --- a/hubblestack_nova/openssl.py +++ b/hubblestack_nova/openssl.py @@ -99,13 +99,10 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=True): +def audit(data_list, tags, debug=True): __data__ = {} for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: @@ -154,63 +151,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=True): tag_data['reason'] = failing_reason ret['Failure'].append(tag_data) - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - description = tag_data.get('description') - control_reason = tag_data.get('control', '') - if (tag, description, control_reason) not in control_reasons: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: tag_dict}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/pkg.py b/hubblestack_nova/pkg.py index 436ec7e..c6cf198 100644 --- a/hubblestack_nova/pkg.py +++ b/hubblestack_nova/pkg.py @@ -79,16 +79,13 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Run the pkg audits contained in the YAML files processed by __virtual__ ''' __data__ = {} for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: @@ -158,63 +155,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): else: ret['Failure'].append(tag_data) - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in control_reasons: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: tag_dict}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/pkgng_audit.py b/hubblestack_nova/pkgng_audit.py index 8e40adc..ebc212a 100644 --- a/hubblestack_nova/pkgng_audit.py +++ b/hubblestack_nova/pkgng_audit.py @@ -20,7 +20,7 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Run the pkg.audit command ''' @@ -42,8 +42,7 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): salt_ret = __salt__['pkg.audit']() results = {'pkgng_audit': {'result': salt_ret}} - if show_profile: - results['pkng_audit']['nova_profile'] = profile + results['pkng_audit']['nova_profile'] = profile if not verbose: results = salt_ret if '0 problem(s)' not in salt_ret: diff --git a/hubblestack_nova/service.py b/hubblestack_nova/service.py index 70a7590..aaa20f5 100644 --- a/hubblestack_nova/service.py +++ b/hubblestack_nova/service.py @@ -72,16 +72,13 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Run the service audits contained in the YAML files processed by __virtual__ ''' __data__ = {} for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: @@ -114,63 +111,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): else: ret['Failure'].append(tag_data) - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in control_reasons: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: tag_dict}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/stat.py b/hubblestack_nova/stat.py index efdc3b6..74e6e58 100644 --- a/hubblestack_nova/stat.py +++ b/hubblestack_nova/stat.py @@ -57,16 +57,13 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Run the stat audits contained in the YAML files processed by __virtual__ ''' __data__ = {} for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: @@ -118,63 +115,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): else: ret['Failure'].append(tag_data) - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - description = tag_data.get('description') - control_reason = tag_data.get('control', '') - if (tag, description, control_reason) not in control_reasons: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: tag_dict}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/sysctl.py b/hubblestack_nova/sysctl.py index 662ae6b..611975a 100644 --- a/hubblestack_nova/sysctl.py +++ b/hubblestack_nova/sysctl.py @@ -49,16 +49,13 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Run the sysctl audits contained in the YAML files processed by __virtual__ ''' __data__ = {} for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: @@ -91,63 +88,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): else: ret['Failure'].append(tag_data) - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in control_reasons: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: tag_dict}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/win_auditpol.py b/hubblestack_nova/win_auditpol.py index 8868896..976fb16 100644 --- a/hubblestack_nova/win_auditpol.py +++ b/hubblestack_nova/win_auditpol.py @@ -25,7 +25,7 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Runs auditpol on the local machine and audits the return data with the CIS yaml processed by __virtual__ @@ -33,10 +33,7 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): __data__ = {} __auditdata__ = _auditpol_import() for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: log.debug('auditpol audit __data__:') @@ -75,63 +72,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): log.debug('When trying to audit the advanced auditpol section,' ' the yaml contained incorrect data for the key') - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in tags_descriptions: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: description}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/win_firewall.py b/hubblestack_nova/win_firewall.py index e432b20..dac1e48 100644 --- a/hubblestack_nova/win_firewall.py +++ b/hubblestack_nova/win_firewall.py @@ -25,7 +25,7 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Runs auditpol on the local machine and audits the return data with the CIS yaml processed by __virtual__ @@ -33,10 +33,7 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): __data__ = {} __firewalldata__ = _import_firewall() for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: log.debug('firewall audit __data__:') @@ -76,63 +73,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): log.debug('When trying to audit the firewall section,' ' the yaml contained incorrect data for the key') - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in tags_descriptions: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: description}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/win_gp.py b/hubblestack_nova/win_gp.py index d84cb9b..c011310 100644 --- a/hubblestack_nova/win_gp.py +++ b/hubblestack_nova/win_gp.py @@ -25,7 +25,7 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Runs auditpol on the local machine and audits the return data with the CIS yaml processed by __virtual__ @@ -33,10 +33,7 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): __data__ = {} __gpdata__ = _get_gp_templates() for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: log.debug('firewall audit __data__:') @@ -75,63 +72,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): log.debug('When trying to audit the firewall section,' ' the yaml contained incorrect data for the key') - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in tags_descriptions: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: description}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/win_pkg.py b/hubblestack_nova/win_pkg.py index a14ef04..fe06efb 100644 --- a/hubblestack_nova/win_pkg.py +++ b/hubblestack_nova/win_pkg.py @@ -25,7 +25,7 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Runs auditpol on the local machine and audits the return data with the CIS yaml processed by __virtual__ @@ -37,10 +37,7 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): __salt__['pkg.refresh_db']() __pkgdata__ = __salt__['pkg.list_pkgs']() for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: log.debug('package audit __data__:') @@ -78,63 +75,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): else: ret['Failure'].append(tag_data) - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in tags_descriptions: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: description}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/win_reg.py b/hubblestack_nova/win_reg.py index 79bc95c..0a0bcdc 100644 --- a/hubblestack_nova/win_reg.py +++ b/hubblestack_nova/win_reg.py @@ -24,17 +24,14 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Runs auditpol on the local machine and audits the return data with the CIS yaml processed by __virtual__ ''' __data__ = {} for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: log.debug('registry audit __data__:') @@ -89,64 +86,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): tag_data['value_found'] = None ret['Failure'].append(tag_data) - - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in tags_descriptions: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: description}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret diff --git a/hubblestack_nova/win_secedit.py b/hubblestack_nova/win_secedit.py index 97c5bca..bcb4b70 100644 --- a/hubblestack_nova/win_secedit.py +++ b/hubblestack_nova/win_secedit.py @@ -30,7 +30,7 @@ def __virtual__(): return True -def audit(data_list, tags, verbose=False, show_profile=False, debug=False): +def audit(data_list, tags, debug=False): ''' Runs secedit on the local machine and audits the return data with the CIS yaml processed by __virtual__ @@ -39,10 +39,7 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): __secdata__ = _secedit_export() __sidaccounts__ = _get_account_sid() for profile, data in data_list: - if show_profile: - _merge_yaml(__data__, data, profile) - else: - _merge_yaml(__data__, data) + _merge_yaml(__data__, data, profile) __tags__ = _get_tags(__data__) if debug: log.debug('secedit audit __data__:') @@ -95,63 +92,6 @@ def audit(data_list, tags, verbose=False, show_profile=False, debug=False): else: ret['Failure'].append(tag_data) - failure = [] - success = [] - controlled = [] - - if not verbose: - # Pull out just the tag and description - tags_descriptions = set() - - for tag_data in ret['Failure']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - failure.append({tag: description}) - tags_descriptions.add((tag, description)) - - tags_descriptions = set() - - for tag_data in ret['Success']: - tag = tag_data['tag'] - description = tag_data.get('description') - if (tag, description) not in tags_descriptions: - success.append({tag: description}) - tags_descriptions.add((tag, description)) - - control_reasons = set() - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - control_reason = tag_data.get('control', '') - description = tag_data.get('description') - if (tag, description, control_reason) not in tags_descriptions: - tag_dict = {'description': description, - 'control': control_reason} - controlled.append({tag: description}) - control_reasons.add((tag, description, control_reason)) - - else: - # Format verbose output as single-key dictionaries with tag as key - for tag_data in ret['Failure']: - tag = tag_data['tag'] - failure.append({tag: tag_data}) - - for tag_data in ret['Success']: - tag = tag_data['tag'] - success.append({tag: tag_data}) - - for tag_data in ret['Controlled']: - tag = tag_data['tag'] - controlled.append({tag: tag_data}) - - ret['Controlled'] = controlled - ret['Success'] = success - ret['Failure'] = failure - - if not ret['Controlled']: - ret.pop('Controlled') - return ret From 7462e5f638b0cd12f90436edd6e38679f5895783 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 2 Mar 2017 15:41:07 -0700 Subject: [PATCH 15/18] Add provisional ubuntu 16.04 profile --- .../ubuntu-1604-level-1-scored-v1-0-0.yaml | 1024 +++++++++++++++++ hubblestack_nova_profiles/top.nova | 2 + 2 files changed, 1026 insertions(+) create mode 100644 hubblestack_nova_profiles/cis/ubuntu-1604-level-1-scored-v1-0-0.yaml diff --git a/hubblestack_nova_profiles/cis/ubuntu-1604-level-1-scored-v1-0-0.yaml b/hubblestack_nova_profiles/cis/ubuntu-1604-level-1-scored-v1-0-0.yaml new file mode 100644 index 0000000..36ba734 --- /dev/null +++ b/hubblestack_nova_profiles/cis/ubuntu-1604-level-1-scored-v1-0-0.yaml @@ -0,0 +1,1024 @@ +# This is mostly a copy from the 14.04 profile, until we have time to do +# a proper profile + +grep: + whitelist: + fstab_dev_shm_partition_nodev: + data: + Ubuntu-16.04: + - /etc/fstab: + match_output: nodev + pattern: /dev/shm + tag: CIS-2.14 + description: Add nodev Option to /run/shm Partition + fstab_dev_shm_partition_noexec: + data: + Ubuntu-16.04: + - /etc/fstab: + match_output: noexec + pattern: /dev/shm + tag: CIS-2.16 + description: Add noexec Option to /run/shm Partition + fstab_dev_shm_partition_nosuid: + data: + Ubuntu-16.04: + - /etc/fstab: + match_output: nosuid + pattern: /dev/shm + tag: CIS-2.15 + description: Add nosuid Option to /run/shm Partition + fstab_home_partition: + data: + Ubuntu-16.04: + - /etc/fstab: + pattern: /home + tag: CIS-2.9 + description: Create Separate Partition for /home + fstab_home_partition_nodev: + data: + Ubuntu-16.04: + - /etc/fstab: + match_output: nodev + pattern: /home + tag: CIS-2.10 + description: Add nodev Option to /home + fstab_tmp_partition: + data: + Ubuntu-16.04: + - /etc/fstab: + pattern: /tmp + tag: CIS-2.1 + description: Create Separate Partition for /tmp + fstab_tmp_partition_nodev: + data: + Ubuntu-16.04: + - /etc/fstab: + match_output: nodev + pattern: /tmp + tag: CIS-2.2 + description: Set nodev option for /tmp Partition + fstab_tmp_partition_noexec: + data: + Ubuntu-16.04: + - /etc/fstab: + match_output: nosuid + pattern: /tmp + tag: CIS-2.4 + description: Set noexec option for /tmp Partition + fstab_tmp_partition_nosuid: + data: + Ubuntu-16.04: + - /etc/fstab: + match_output: nosuid + pattern: /tmp + tag: CIS-2.3 + description: Set nosuid option for /tmp Partition + fstab_var_log_audit_partition: + data: + Ubuntu-16.04: + - /etc/fstab: + pattern: /var/log/audit + tag: CIS-2.8 + description: Create Separate Partition for /var/log/audit + fstab_var_log_partition: + data: + Ubuntu-16.04: + - /etc/fstab: + pattern: /var/log + tag: CIS-2.7 + description: Create Separate Partition for /var/log + fstab_var_partition: + data: + Ubuntu-16.04: + - /etc/fstab: + pattern: /var + tag: CIS-2.5 + description: Create Separate Partition for /var + fstab_var_tmp_bind_mount: + data: + Ubuntu-16.04: + - /etc/fstab: + match_output: /var/tmp + pattern: /var + tag: CIS-2.6 + description: Bind Mount the /var/tmp directory to /tmp + grub_password: + data: + Ubuntu-16.04: + - /boot/grub/grub.cfg: + pattern: password + tag: CIS-3.3 + description: Set Boot Loader Password + core_hard_limit: + data: + Ubuntu-16.04: + - /etc/security/limits.conf: + match_output: '0' + pattern: hard core + tag: CIS-4.1 + description: Restrict Core Dumps + ntp_restrict_default: + data: + Ubuntu-16.04: + - /etc/ntp.conf: + pattern: '^restrict' + match_output: default + tag: CIS-6.5 + - /etc/ntp.conf: + pattern: restrict -6 default + tag: CIS-6.5 + - /etc/ntp.conf: + pattern: '^server' + tag: CIS-6.5 + - /etc/init.d/ntp: + pattern: RUNASUSER= + tag: CIS-6.5 + description: Configure Network Time Protocol (NTP) + local_mta: + data: + Ubuntu-16.04: + - /etc/postfix/main.cf: + pattern: '^inet_interfaces' + match_output: localhost + tag: CIS-6.15 + description: Configure Mail Transfer Agent for Local-Only Mode + rsync: + data: + Ubuntu-16.04: + - /etc/default/rsync: + pattern: ^RSYNC_ENABLE + match_output: 'false' + tag: CIS-6.16 + description: Ensure rsync service is not enabled + rsyslog_file_perms: + data: + Ubuntu-16.04: + - /etc/rsyslog.conf: + pattern: '^\$FileCreateMode' + match_output: '0640' + tag: CIS-8.2.4 + description: Create and Set Permissions on rsyslog Log Files + rsyslog_remote_logging: + data: + Ubuntu-16.04: + - /etc/rsyslog.conf: + pattern: ^*.*[^I][^I]*@ + tag: CIS-8.2.5 + description: Configure rsyslog to Send Logs to a Remote Log Host + pam_cracklib_settings: + data: + Ubuntu-16.04: + - /etc/pam.d/common-password: + pattern: pam_cracklib + match_output: 'retry=3' + tag: CIS-9.2.1 + - /etc/pam.d/common-password: + pattern: pam_cracklib + match_output: 'minlen=14' + tag: CIS-9.2.1 + - /etc/pam.d/common-password: + pattern: pam_cracklib + match_output: 'dcredit=-1' + tag: CIS-9.2.1 + - /etc/pam.d/common-password: + pattern: pam_cracklib + match_output: 'ucredit=-1' + tag: CIS-9.2.1 + - /etc/pam.d/common-password: + pattern: pam_cracklib + match_output: 'ocredit=-1' + tag: CIS-9.2.1 + - /etc/pam.d/common-password: + pattern: pam_cracklib + match_output: 'lcredit=-1' + tag: CIS-9.2.1 + description: Set Password Creation Requirement Parameters Using pam_cracklib + pam_password_reuse: + data: + Ubuntu-16.04: + - /etc/pam.d/common-password: + pattern: remember + match_output: 'remember=5' + tag: CIS-9.2.3 + description: Limit Password Reuse + ssh_version_2: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: Protocol + match_output: '2' + tag: CIS-9.3.1 + description: Set SSH Protocol to 2 + ssh_log_level: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: LogLevel + match_output: INFO + tag: CIS-9.3.2 + description: Set LogLevel to INFO + ssh_disable_xforward: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: XForwarding + match: 'no' + tag: CIS-9.3.4 + description: Disable SSH X11 Forwarding + ssh_auth_retries: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: MaxAuthTries + match: '4' + tag: CIS-9.3.5 + description: Set SSH MaxAuthTries to 4 or Less + ssh_ignore_rhosts: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: IgnoreRhosts + match: 'yes' + tag: CIS-9.3.6 + description: Set SSH IgnoreRhosts to Yes + ssh_hostbased_auth: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: HostbasedAuthentication + match: 'no' + tag: CIS-9.3.7 + description: Set SSH HostbasedAuthentication to No + ssh_permit_root: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: PermitRootLogin + match: 'no' + tag: CIS-9.3.8 + description: Disable SSH Root Login + ssh_permit_empty_pw: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: PermitEmptyPasswords + match: 'no' + tag: CIS-9.3.9 + description: Set SSH PermitEmptyPasswords to No + ssh_permit_user_env: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: PermitUserEnvironment + match: 'no' + tag: CIS-9.3.10 + description: Do Not Allow Users to Set Environment Options + ssh_restrict_cipher: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: Ciphers + match: 'aes128-ctr,aes192-ctr,aes256-ctr' + tag: CIS-9.3.11 + description: Use Only Approved Cipher in Counter Mode + ssh_idle_timeout: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: ClientAliveInterval + match: '300' + tag: CIS-9.3.12 + - /etc/ssh/sshd_config: + pattern: ClientAliveCountMax + match: 0 + tag: CIS-9.3.12 + description: Set Idle Timeout Interval for User Login + ssh_limit_access: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + pattern: AllowUsers + tag: CIS-9.3.13 + - /etc/ssh/sshd_config: + pattern: AllowGroups + tag: CIS-9.3.13 + - /etc/ssh/sshd_config: + pattern: DenyUsers + tag: CIS-9.3.13 + - /etc/ssh/sshd_config: + pattern: DenyGroups + tag: CIS-9.3.13 + description: Limit Access via SSH + ssh_set_banner: + data: + Ubuntu-16.04: + - /etc/sshd_conf: + pattern: Banner + match: issue + tag: CIS-9.3.14 + description: Set SSH Banner + limit_su_access: + data: + Ubuntu-16.04: + - /etc/pam.d/su: + pattern: pam_wheel.so + match_output: use_uid + tag: CIS-9.5 + - /etc/group: + pattern: wheel + tag: CIS-9.5 + description: Restrict Access to the su Command + password_max_days: + data: + Ubuntu-16.04: + - /etc/login.defs: + pattern: PASS_MAX_DAYS + match_output: '90' + tag: CIS-10.1.1 + description: Set Password Expiration Days + password_min_days: + data: + Ubuntu-16.04: + - /etc/login.defs: + pattern: PASS_MIN_DAYS + match_output: '7' + tag: CIS-10.1.2 + description: Set Password Change Minimum Number of Days + password_warn_days: + data: + Ubuntu-16.04: + - /etc/login.defs: + pattern: PASS_WARN_AGE + match_output: '7' + tag: CIS-10.1.3 + description: Set Password Expiring Warning Days + umask: + data: + Ubuntu-16.04: + - /etc/login.defs: + pattern: UMASK + match_output: '077' + tag: CIS-10.4 + description: Set Default umask for Users + blacklist: + root_passwd_set: + data: + Ubuntu-16.04: + - /etc/passwd: + pattern: '^root:[*\!]:' + tag: CIS-3.4 + description: Require Authentication for Single-User Mode + rsh_inet: + data: + Ubuntu-16.04: + - /etc/inetd.conf: + pattern: '^shell' + tag: CIS-5.1.2 + - /etc/inetd.conf: + pattern: '^login' + tag: CIS-5.1.2 + - /etc/inetd.conf: + pattern: '^exec' + tag: CIS-5.1.2 + description: Ensure rsh server is not enabled + talk: + data: + Ubuntu-16.04: + - /etc/inetd.conf: + pattern: '^talk' + tag: CIS-5.1.4 + - /etc/inetd.conf: + pattern: '^ntalk' + tag: CIS-5.1.4 + description: Ensure talk server is not enabled + telnet: + data: + Ubuntu-16.04: + - /etc/inetd.conf: + pattern: '^telnet' + tag: CIS-5.1.6 + description: Ensure Telnet Server is not Enabled + tftp: + data: + Ubuntu-16.04: + - /etc/inetd.conf: + pattern: '^tftp' + tag: CIS-5.1.7 + description: Ensure tftp-server is not enabled + chargen: + data: + Ubuntu-16.04: + - /etc/inetd.conf: + pattern: '^chargen' + tag: CIS-5.2 + description: Ensure Chargen is not Enabled + daytime: + data: + Ubuntu-16.04: + - /etc/inetd.conf: + pattern: '^daytime' + tag: CIS-5.3 + description: Ensure daytime is not Enabled + echo: + data: + Ubuntu-16.04: + - /etc/inetd.conf: + pattern: '^echo' + tag: CIS-5.4 + description: Ensure echo is not Enabled + discard: + data: + Ubuntu-16.04: + - /etc/inetd.conf: + pattern: '^discard' + tag: CIS-5.5 + description: Ensure discard is not Enabled + time: + data: + Ubuntu-16.04: + - /etc/inetd.conf: + pattern: '^time' + tag: CIS-5.6 + description: Ensure time is not Enabled + banner_os_info_motd: + data: + Ubuntu-16.04: + - /etc/motd: + pattern: '\v' + tag: CIS-11.2 + - /etc/motd: + pattern: '\r' + tag: CIS-11.2 + - /etc/motd: + pattern: '\m' + tag: CIS-11.2 + - /etc/motd: + pattern: '\s' + tag: CIS-11.2 + description: Remove OS Information from Login Warning Banners + banner_os_info_issue: + data: + Ubuntu-16.04: + - /etc/issue: + pattern: '\v' + tag: CIS-11.2 + - /etc/issue: + pattern: '\r' + tag: CIS-11.2 + - /etc/issue: + pattern: '\m' + tag: CIS-11.2 + - /etc/issue: + pattern: '\s' + tag: CIS-11.2 + description: Remove OS Information from Login Warning Banners + banner_os_info_issue_net: + data: + Ubuntu-16.04: + - /etc/issue.net: + pattern: '\v' + tag: CIS-11.2 + - /etc/issue.net: + pattern: '\r' + tag: CIS-11.2 + - /etc/issue.net: + pattern: '\m' + tag: CIS-11.2 + - /etc/issue.net: + pattern: '\s' + tag: CIS-11.2 + description: Remove OS Information from Login Warning Banners + legacy_entries_passwd: + data: + Ubuntu-16.04: + - /etc/passwd: + pattern: '^+' + tag: CIS-13.2 + description: Verify No Legacy "+" Entries Exist in /etc/passwd File + legacy_entries_shadow: + data: + Ubuntu-16.04: + - /etc/shadow: + pattern: '^+' + tag: CIS-13.3 + description: Verify No Legacy "+" Entries Exist in /etc/shadow File + legacy_entries_group: + data: + Ubuntu-16.04: + - /etc/group: + pattern: '^+' + tag: CIS-13.4 + description: Verify No Legacy "+" Entries Exist in /etc/group File + +service: + blacklist: + autofs: + data: + Ubuntu-16.04: + - autofs: CIS-2.25 + description: Disable Automounting + apport: + data: + Ubuntu-16.04: + - apport: CIS-4.1 + description: Restrict Core Dumps + whoopsie: + data: + Ubuntu-16.04: + - whoopsie: CIS-4.1 + description: Restrict Core Dumps + xinetd: + data: + Ubuntu-16.04: + - xinetd: CIS-5.1.8 + description: Ensure xinetd is not enabled + avahi_daemon: + data: + Ubuntu-16.04: + - avahi-daemon: CIS-6.2 + description: Ensure Avahi Server is not enabled + cups: + data: + Ubuntu-16.04: + - cups: CIS-6.3 + description: Ensure print server is not enabled + dhcp-server: + data: + Ubuntu-16.04: + - isc-dhcp-server: CIS-6.4 + description: Ensure DHCP Server is not enabled + whitelist: + rsyslog: + data: + Ubuntu-16.04: + - rsyslog: CIS-8.2.2 + description: Ensure the rsyslog Service is activated + cron: + data: + Ubuntu-16.04: + - cron: CIS-9.1.1 + description: Enable cron Daemon + +sysctl: + disable_suid_dumpable: + data: + Ubuntu-16.04: + - fs.suid_dumpable: + match_output: '0' + tag: CIS-4.1 + description: Restrict Core Dumps + randomize_va_space: + data: + Ubuntu-16.04: + - kernel.randomize_va_space: + match_output: '2' + tag: CIS-4.3 + description: Enable Randomized Virtual Memory Region Placement + disable_ip4_ip_forward: + data: + Ubuntu-16.04: + - net.ipv4.ip_forward: + match_output: '0' + tag: CIS-7.1.1 + description: Disable IP Forwarding + disable_packet_redirect: + data: + Ubuntu-16.04: + - net.ipv4.conf.all.send_redirects: + match_output: '0' + tag: CIS-7.1.2 + description: Disable Send Packet Redirects + disable_source_routed_packets: + data: + Ubuntu-16.04: + - net.ipv4.conf.all.accept_source_route: + match_output: '0' + tag: CIS-7.2.1 + description: Disable Source Routed Packet Acceptance + disable_icmp_redirect: + data: + Ubuntu-16.04: + - net.ipv4.conf.all.accept_redirects: + match_output: '0' + tag: CIS-7.2.2 + description: Disable ICMP Redirect Acceptance + disable_secure_icmp_redirect: + data: + Ubuntu-16.04: + - net.ipv4.conf.all.secure_redirects: + match_output: '0' + tag: CIS-7.2.3 + description: Disable Secure ICMP Redirect Acceptance + log_martians: + data: + Ubuntu-16.04: + - net.ipv4.conf.all.log_martians: + match_output: '1' + tag: CIS-7.2.4 + description: Log Suspicious Packets + ignore_broadcast: + data: + Ubuntu-16.04: + - net.ipv4.icmp_echo_ignore_broadcasts: + match_output: '1' + tag: CIS-7.2.5 + description: Enable Ignore Broadcast Requests + bogus_errors: + data: + Ubuntu-16.04: + - icmp_ignore_bogus_error_responses: + match_output: '1' + tag: CIS-7.2.6 + description: Enable Bad Error Message Protection + rp_filter: + data: + Ubuntu-16.04: + - net.ipv4.conf.all.rp_filter: + match_output: '1' + tag: CIS-7.2.7 + description: Enable RFC-recommended Source Route Validation + tcp_syncookies: + data: + Ubuntu-16.04: + - net.ipv4.tcp_syncookies: + match_output: '1' + tag: CIS-7.2.8 + description: Enable TCP SYN Cookies + +pkg: + blacklist: + prelink: + data: + Ubuntu-16.04: + - prelink: CIS-4.4 + description: Disable Prelink + nis: + data: + Ubuntu-16.04: + - nis: CIS-5.1.1 + description: Ensure NIS is not installed + talk: + data: + Ubuntu-16.04: + - talk: CIS-5.1.5 + description: Ensure Talk Client is not installed + xserver: + data: + Ubuntu-16.04: + - xserver-xorg-core\*: CIS-6.1 + description: Ensure the X Window system is not installed + biosdevname: + data: + Ubuntu-16.04: + - biosdevname: CIS-6.17 + description: Ensure biosdevname is not enabled + whitelist: + ntp: + data: + Ubuntu-16.04: + - ntp: CIS-6.5 + description: Configure Network Time Protocol (NTP) + tcpd: + data: + Ubuntu-16.04: + - tcpd: CIS-7.4.1 + description: Install TCP Wrappers + rsyslog: + data: + Ubuntu-16.04: + - rsyslog: CIS-8.2.1 + description: Install the rsyslog package + +stat: + grub_cfg_owner: + data: + Ubuntu-16.04: + - /boot/grub/grub.cfg: + gid: 0 + group: root + uid: 0 + user: root + tag: CIS-3.1 + description: Set User/Group Owner on bootloader config + grub_cfg_perms: + data: + Ubuntu-16.04: + - /boot/grub/grub.cfg: + mode: 600 + tag: CIS-3.2 + description: Set Permissions on bootloader config + hosts_allow_perms: + data: + Ubuntu-16.04: + - /etc/hosts.allow: + mode: 644 + tag: CIS-7.4.3 + description: Verify Permissions on /etc/hosts.allow + hosts_deny_perms: + data: + Ubuntu-16.04: + - /etc/hosts.deny: + mode: 644 + tag: CIS-7.4.5 + description: Verify Permissions on /etc/hosts.deny + crontab_own_perms: + data: + Ubuntu-16.04: + - /etc/crontab: + gid: 0 + group: root + uid: 0 + user: root + mode: 600 + tag: CIS-9.1.2 + description: Set User/Group Owner and Permission on /etc/crontab + cron_hourly_own_perms: + data: + Ubuntu-16.04: + - /etc/cron.hourly: + gid: 0 + group: root + uid: 0 + user: root + mode: 600 + tag: CIS-9.1.3 + description: Set User/Group Owner and Permission on /etc/cron.hourly + cron_daily_own_perms: + data: + Ubuntu-16.04: + - /etc/cron.daily: + gid: 0 + group: root + uid: 0 + user: root + mode: 600 + tag: CIS-9.1.4 + description: Set User/Group Owner and Permission on /etc/cron.daily + cron_weekly_own_perms: + data: + Ubuntu-16.04: + - /etc/cron.weekly: + gid: 0 + group: root + uid: 0 + user: root + mode: 600 + tag: CIS-9.1.5 + description: Set User/Group Owner and Permission on /etc/cron.weekly + cron_monthly_own_perms: + data: + Ubuntu-16.04: + - /etc/cron.monthly: + gid: 0 + group: root + uid: 0 + user: root + mode: 600 + tag: CIS-9.1.6 + description: Set User/Group Owner and Permission on /etc/cron.monthly + cron_d_own_perms: + data: + Ubuntu-16.04: + - /etc/cron.d: + gid: 0 + group: root + uid: 0 + user: root + mode: 700 + tag: CIS-9.1.7 + description: Set User/Group Owner and Permission on /etc/cron.d + at_cron_allow: + data: + Ubuntu-16.04: + - /etc/cron.deny: + gid: null + group: null + mode: null + tag: CIS-9.1.8 + uid: null + user: null + - /etc/at.deny: + gid: null + group: null + mode: null + tag: CIS-9.1.8 + uid: null + user: null + - /etc/cron.allow: + gid: 0 + group: root + mode: 600 + tag: CIS-9.1.8 + uid: 0 + user: root + - /etc/at.allow: + gid: 0 + group: root + mode: 600 + tag: CIS-9.1.8 + uid: 0 + user: root + description: Restrict at/cron to authorized users + sshd_config: + data: + Ubuntu-16.04: + - /etc/ssh/sshd_config: + uid: 0 + gid: 0 + user: root + group: root + mode: 600 + tag: CIS-9.3.3 + description: Set Permissions on /etc/ssh/sshd_config + banner_files: + data: + Ubuntu-16.04: + - /etc/motd: + uid: 0 + gid: 0 + user: root + group: root + mode: 644 + tag: CIS-11.1 + - /etc/issue: + uid: 0 + gid: 0 + user: root + group: root + mode: 644 + tag: CIS-11.1 + - /etc/issue.net: + uid: 0 + gid: 0 + user: root + group: root + mode: 644 + tag: CIS-11.1 + description: Set Warning Banner for Standard Login Services + passwd_perms: + data: + Ubuntu-16.04: + - /etc/passwd: + mode: 644 + tag: CIS-12.1 + description: Verify Permissions on /etc/passwd + shadow_perms: + data: + Ubuntu-16.04: + - /etc/shadow: + mode: 640 + tag: CIS-12.2 + description: Verify Permissions on /etc/shadow + group_perms: + data: + Ubuntu-16.04: + - /etc/group: + mode: 644 + tag: CIS-12.3 + description: Verify Permissions on /etc/group + passwd_owner_group: + data: + Ubuntu-16.04: + - /etc/passwd: + uid: 0 + gid: 0 + user: root + group: root + tag: CIS-12.4 + description: Verify User/Group Ownership on /etc/passwd + shadow_owner_group: + data: + Ubuntu-16.04: + - /etc/shadow: + uid: 0 + gid: 42 + user: root + group: shadow + tag: CIS-12.5 + description: Verify User/Group Ownership on /etc/shadow + group_user_group: + data: + Ubuntu-16.04: + - /etc/group: + uid: 0 + gid: 0 + user: root + group: root + tag: CIS-12.6 + description: Verify User/Group Ownership on /etc/group + +command: + rsh_client: + data: + Ubuntu-16.04: + tag: CIS-5.1.3 + commands: + - 'dpkg -s rsh-client': + match_output: is not installed + - 'dpkg -s rsh-redone-client': + match_output: is not installed + description: Ensure RSH Client is not Installed + ufw_active: + data: + Ubuntu-16.04: + tag: CIS-7.7 + commands: + - 'ufw status': + match_output: Status active + description: Ensure Firewall is active + disable_system_accts: + data: + Ubuntu-16.04: + tag: CIS-10.2 + commands: + - ? + > + egrep -v "^\+" /etc/passwd | awk -F: '($1!="root" && $1!="sync" + && $1!="shutdown" && $1!="halt" && $3<500 + && $7!="/usr/sbin/nologin" && $7!="/bin/false") {print}' + : + shell: /bin/bash + fail_if_matched: true + description: Disable System Accounts + default_root_group: + data: + Ubuntu-16.04: + tag: CIS-10.3 + commands: + - 'grep "^root:" /etc/passwd | cut -f4 -d:': + match_output: '0' + description: Set Default Group for root Account + inactive_users: + data: + Ubuntu-16.04: + tag: CIS-10.5 + commands: + - 'useradd -D | grep INACTIVE': + match_output: '35' + description: Lock Inactive User Accounts + empty_passwd_field: + data: + Ubuntu-16.04: + tag: CIS-13.1 + commands: + - ? + > + cat /etc/shadow | /usr/bin/awk -F: '($2 == "" ) { print $1 }' + : + shell: /bin/bash + fail_if_matched: true + description: Ensure Password Fields are Not Empty + uid_zero_only_root: + data: + Ubuntu-16.04: + tag: CIS-13.5 + commands: + - ? + > + cat /etc/passwd | /usr/bin/awk -F: '($1!="root" && $3==0) { print $1 }' + : + shell: /bin/bash + fail_if_matched: true + description: Verify No UID 0 Accounts Exist Other Than root + root_path: + data: + Ubuntu-16.04: + tag: CIS-13.6 + commands: + - ? + |- + if [ "`echo $PATH | grep :: `" != "" ]; then + echo "Empty Directory in PATH (::)" + fi + if [ "`echo $PATH | grep :$`" != "" ]; then + echo "Trailing : in PATH" + fi + p=`echo $PATH | sed -e 's/::/:/' -e 's/:$//' -e 's/:/ /g'` + set -- $p + while [ "$1" != "" ]; do + if [ "$1" = "." ]; then + echo "PATH contains ." + shift + continue + fi + if [ -d $1 ]; then + dirperm=`ls -ldH $1 | cut -f1 -d" "` + if [ `echo $dirperm | cut -c6 ` != "-" ]; then + echo "Group Write permission set on directory $1" + fi + if [ `echo $dirperm | cut -c9 ` != "-" ]; then + echo "Other Write permission set on directory $1" + fi + dirown=`ls -ldH $1 | awk '{print $3}'` + if [ "$dirown" != "root" ] ; then + echo $1 is not owned by root + fi + else + echo $1 is not a directory + fi + shift + done + : + shell: /bin/bash + fail_if_matched: true + description: Ensure root PATH Integrity diff --git a/hubblestack_nova_profiles/top.nova b/hubblestack_nova_profiles/top.nova index 167482d..d35a1ba 100644 --- a/hubblestack_nova_profiles/top.nova +++ b/hubblestack_nova_profiles/top.nova @@ -19,6 +19,8 @@ nova: - cis.rhelw-7-level-1-scored-v2-1-0 'G@osfinger:Ubuntu-14.04': - cis.ubuntu-1404-level-1-scored-v1-0-0 + 'G@osfinger:Ubuntu-16.04': + - cis.ubuntu-1604-level-1-scored-v1-0-0 'G@osfullname:Microsoft*Windows*Server*2008*': - cis.windows-2008r2-level-1-scored-v3-0-0 'G@osfullname:Microsoft*Windows*Server*2012*': From 570df3c016d250e4dbda865983a89bfd32bcbb90 Mon Sep 17 00:00:00 2001 From: Paul Miller Date: Mon, 6 Mar 2017 12:59:57 -0500 Subject: [PATCH 16/18] On many of my networks, I get an explicit ICMP: dest unreach (or auth prohib) If the connect isn't timed out, it causes the returner to crash, sometimes silently. >>> try: ... requests.get('http://169.254.169.251/shoot') ... except requests.exceptions.ConnectTimeout: ... print "lol?" ... Traceback (most recent call last): File "", line 2, in File "/usr/lib/python2.6/site-packages/requests/api.py", line 68, in get return request('get', url, **kwargs) File "/usr/lib/python2.6/site-packages/requests/api.py", line 50, in request response = session.request(method=method, url=url, **kwargs) File "/usr/lib/python2.6/site-packages/requests/sessions.py", line 464, in request resp = self.send(prep, **send_kwargs) File "/usr/lib/python2.6/site-packages/requests/sessions.py", line 576, in send r = adapter.send(request, **kwargs) File "/usr/lib/python2.6/site-packages/requests/adapters.py", line 415, in send raise ConnectionError(err, request=request) requests.exceptions.ConnectionError: ('Connection aborted.', error(101, 'Network is unreachable')) >>> try: ... requests.get('http://169.254.169.251/shoot') ... except requests.exceptions.RequestException: ... print "lol?" ... lol? --- _returners/splunk_nebula_return.py | 2 +- _returners/splunk_nova_return.py | 2 +- _returners/splunk_pulsar_return.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/_returners/splunk_nebula_return.py b/_returners/splunk_nebula_return.py index 17e78d8..43bb652 100644 --- a/_returners/splunk_nebula_return.py +++ b/_returners/splunk_nebula_return.py @@ -84,7 +84,7 @@ def returner(ret): timeout=1).text aws_account_id = requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document', timeout=1).json().get('accountId', 'unknown') - except (requests.exceptions.ConnectTimeout, ValueError): + except (requests.exceptions.RequestException, ValueError): # Not on an AWS box pass diff --git a/_returners/splunk_nova_return.py b/_returners/splunk_nova_return.py index 09cb23a..5ce98c0 100644 --- a/_returners/splunk_nova_return.py +++ b/_returners/splunk_nova_return.py @@ -83,7 +83,7 @@ def returner(ret): timeout=1).text aws_account_id = requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document', timeout=1).json().get('accountId', 'unknown') - except (requests.exceptions.ConnectTimeout, ValueError): + except (requests.exceptions.RequestException, ValueError): # Not on an AWS box pass diff --git a/_returners/splunk_pulsar_return.py b/_returners/splunk_pulsar_return.py index a4b68e8..b4ef95b 100644 --- a/_returners/splunk_pulsar_return.py +++ b/_returners/splunk_pulsar_return.py @@ -89,7 +89,7 @@ def returner(ret): timeout=1).text aws_account_id = requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document', timeout=1).json().get('accountId', 'unknown') - except (requests.exceptions.ConnectTimeout, ValueError): + except (requests.exceptions.RequestException, ValueError): # Not on an AWS box pass From 35ee5eaa2aee44ef2adf7756c7c637d9c39810bf Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Mon, 6 Mar 2017 15:36:57 -0700 Subject: [PATCH 17/18] Consolidate pillar and allow for multiple splunk endpoints https://github.com/hubblestack/quasar/pull/51 --- _returners/splunk_nebula_return.py | 203 ++++++++------- _returners/splunk_nova_return.py | 343 +++++++++++++------------- _returners/splunk_pulsar_return.py | 379 +++++++++++++++-------------- 3 files changed, 487 insertions(+), 438 deletions(-) diff --git a/_returners/splunk_nebula_return.py b/_returners/splunk_nebula_return.py index 43bb652..2855e7f 100644 --- a/_returners/splunk_nebula_return.py +++ b/_returners/splunk_nebula_return.py @@ -3,7 +3,6 @@ HubbleStack Nebula-to-Splunk returner :maintainer: HubbleStack -:maturity: 2016.10.4 :platform: All :requires: SaltStack @@ -13,13 +12,12 @@ .. code-block:: yaml hubblestack: - nebula: - returner: - splunk: - token: - indexer: - sourcetype: - index: + returner: + splunk: + - token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + indexer: splunk-indexer.domain.tld + index: hubble + sourcetype_nebula: hubble_osquery You can also add an `custom_fields` argument which is a list of keys to add to events with using the results of config.get(). These new keys will be prefixed @@ -30,13 +28,12 @@ .. code-block:: yaml hubblestack: - nebula: - returner: - splunk: - token: - indexer: - sourcetype: - index: + returner: + splunk: + - token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + indexer: splunk-indexer.domain.tld + index: hubble + sourcetype_nebula: hubble_osquery custom_fields: - site - product_group @@ -62,16 +59,7 @@ def returner(ret): - # Customized to split up the queries and extract the correct sourcetype - - opts = _get_options() - logging.info('Options: %s' % json.dumps(opts)) - http_event_collector_key = opts['token'] - http_event_collector_host = opts['indexer'] - hec_ssl = opts['http_event_server_ssl'] - proxy = opts['proxy'] - timeout = opts['timeout'] - custom_fields = opts['custom_fields'] + opts_list = _get_options() # Gather amazon information if present aws_ami_id = None @@ -88,79 +76,106 @@ def returner(ret): # Not on an AWS box pass - # Set up the collector - hec = http_event_collector(http_event_collector_key, http_event_collector_host, http_event_server_ssl=hec_ssl, proxy=proxy, timeout=timeout) - - # st = 'salt:hubble:nova' - data = ret['return'] - minion_id = ret['id'] - jid = ret['jid'] - master = __grains__['master'] - fqdn = __grains__['fqdn'] - # Sometimes fqdn is blank. If it is, replace it with minion_id - fqdn = fqdn if fqdn else minion_id - try: - fqdn_ip4 = __grains__['fqdn_ip4'][0] - except IndexError: - fqdn_ip4 = __grains__['ipv4'][0] - - if not data: - return - else: - for query in data: - for query_name, query_results in query.iteritems(): - for query_result in query_results['data']: - event = {} - payload = {} - event.update(query_result) - event.update({'query': query_name}) - event.update({'job_id': jid}) - event.update({'master': master}) - event.update({'minion_id': minion_id}) - event.update({'dest_host': fqdn}) - event.update({'dest_ip': fqdn_ip4}) - - if aws_instance_id is not None: - event.update({'aws_ami_id': aws_ami_id}) - event.update({'aws_instance_id': aws_instance_id}) - event.update({'aws_account_id': aws_account_id}) - - for custom_field in custom_fields: - custom_field_name = 'custom_' + custom_field - custom_field_value = __salt__['config.get'](custom_field, '') - if isinstance(custom_field_value, str): - event.update({custom_field_name: custom_field_value}) - elif isinstance(custom_field_value, list): - custom_field_value = ','.join(custom_field_value) - event.update({custom_field_name: custom_field_value}) - - payload.update({'host': fqdn}) - payload.update({'index': opts['index']}) - payload.update({'sourcetype': opts['sourcetype']}) - payload.update({'event': event}) - hec.batchEvent(payload) - - hec.flushBatch() + for opts in opts_list: + logging.info('Options: %s' % json.dumps(opts)) + http_event_collector_key = opts['token'] + http_event_collector_host = opts['indexer'] + hec_ssl = opts['http_event_server_ssl'] + proxy = opts['proxy'] + timeout = opts['timeout'] + custom_fields = opts['custom_fields'] + + # Set up the collector + hec = http_event_collector(http_event_collector_key, http_event_collector_host, http_event_server_ssl=hec_ssl, proxy=proxy, timeout=timeout) + + # st = 'salt:hubble:nova' + data = ret['return'] + minion_id = ret['id'] + jid = ret['jid'] + master = __grains__['master'] + fqdn = __grains__['fqdn'] + # Sometimes fqdn is blank. If it is, replace it with minion_id + fqdn = fqdn if fqdn else minion_id + try: + fqdn_ip4 = __grains__['fqdn_ip4'][0] + except IndexError: + fqdn_ip4 = __grains__['ipv4'][0] + + if not data: + return + else: + for query in data: + for query_name, query_results in query.iteritems(): + for query_result in query_results['data']: + event = {} + payload = {} + event.update(query_result) + event.update({'query': query_name}) + event.update({'job_id': jid}) + event.update({'master': master}) + event.update({'minion_id': minion_id}) + event.update({'dest_host': fqdn}) + event.update({'dest_ip': fqdn_ip4}) + + if aws_instance_id is not None: + event.update({'aws_ami_id': aws_ami_id}) + event.update({'aws_instance_id': aws_instance_id}) + event.update({'aws_account_id': aws_account_id}) + + for custom_field in custom_fields: + custom_field_name = 'custom_' + custom_field + custom_field_value = __salt__['config.get'](custom_field, '') + if isinstance(custom_field_value, str): + event.update({custom_field_name: custom_field_value}) + elif isinstance(custom_field_value, list): + custom_field_value = ','.join(custom_field_value) + event.update({custom_field_name: custom_field_value}) + + payload.update({'host': fqdn}) + payload.update({'index': opts['index']}) + payload.update({'sourcetype': opts['sourcetype']}) + payload.update({'event': event}) + hec.batchEvent(payload) + + hec.flushBatch() return def _get_options(): - try: - token = __salt__['config.get']('hubblestack:nebula:returner:splunk:token').strip() - indexer = __salt__['config.get']('hubblestack:nebula:returner:splunk:indexer') - sourcetype = __salt__['config.get']('hubblestack:nebula:returner:splunk:sourcetype') - index = __salt__['config.get']('hubblestack:nebula:returner:splunk:index') - custom_fields = __salt__['config.get']('hubblestack:nebula:returner:splunk:custom_fields', []) - except: - return None - splunk_opts = {'token': token, 'indexer': indexer, 'sourcetype': sourcetype, 'index': index, 'custom_fields': custom_fields} - - hec_ssl = __salt__['config.get']('hubblestack:nebula:returner:splunk:hec_ssl', True) - splunk_opts['http_event_server_ssl'] = hec_ssl - splunk_opts['proxy'] = __salt__['config.get']('hubblestack:nebula:returner:splunk:proxy', {}) - splunk_opts['timeout'] = __salt__['config.get']('hubblestack:nebula:returner:splunk:timeout', 9.05) - - return splunk_opts + if __salt__['config.get']('hubblestack:returner:splunk'): + splunk_opts = [] + returner_opts = __salt__['config.get']('hubblestack:returner:splunk') + if not isinstance(returner_opts, list): + returner_opts = [returner_opts] + for opt in returner_opts: + processed = {} + processed['token'] = opt.get('token') + processed['indexer'] = opt.get('indexer') + processed['index'] = opt.get('index') + processed['custom_fields'] = opt.get('custom_fields', []) + processed['sourcetype'] = opt.get('sourcetype_nebula', 'hubble_osquery') + processed['http_event_server_ssl'] = opt.get('hec_ssl', True) + processed['proxy'] = opt.get('proxy', {}) + processed['timeout'] = opt.get('timeout', 9.05) + splunk_opts.append(processed) + return splunk_opts + else: + try: + token = __salt__['config.get']('hubblestack:nebula:returner:splunk:token').strip() + indexer = __salt__['config.get']('hubblestack:nebula:returner:splunk:indexer') + sourcetype = __salt__['config.get']('hubblestack:nebula:returner:splunk:sourcetype') + index = __salt__['config.get']('hubblestack:nebula:returner:splunk:index') + custom_fields = __salt__['config.get']('hubblestack:nebula:returner:splunk:custom_fields', []) + except: + return None + splunk_opts = {'token': token, 'indexer': indexer, 'sourcetype': sourcetype, 'index': index, 'custom_fields': custom_fields} + + hec_ssl = __salt__['config.get']('hubblestack:nebula:returner:splunk:hec_ssl', True) + splunk_opts['http_event_server_ssl'] = hec_ssl + splunk_opts['proxy'] = __salt__['config.get']('hubblestack:nebula:returner:splunk:proxy', {}) + splunk_opts['timeout'] = __salt__['config.get']('hubblestack:nebula:returner:splunk:timeout', 9.05) + + return [splunk_opts] def send_splunk(event, index_override=None, sourcetype_override=None): diff --git a/_returners/splunk_nova_return.py b/_returners/splunk_nova_return.py index 5ce98c0..efed5fa 100644 --- a/_returners/splunk_nova_return.py +++ b/_returners/splunk_nova_return.py @@ -3,7 +3,6 @@ HubbleStack Nova-to-Splunk returner :maintainer: HubbleStack -:maturity: 2016.10.4 :platform: All :requires: SaltStack @@ -13,13 +12,12 @@ .. code-block:: yaml hubblestack: - nova: - returner: - splunk: - token: - indexer: - sourcetype: - index: + returner: + splunk: + - token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + indexer: splunk-indexer.domain.tld + index: hubble + sourcetype_nova: hubble_audit You can also add an `custom_fields` argument which is a list of keys to add to events with using the results of config.get(). These new keys will be prefixed @@ -30,13 +28,12 @@ .. code-block:: yaml hubblestack: - nova: - returner: - splunk: - token: - indexer: - sourcetype: - index: + returner: + splunk: + - token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + indexer: splunk-indexer.domain.tld + index: hubble + sourcetype_nova: hubble_audit custom_fields: - site - product_group @@ -62,15 +59,7 @@ def returner(ret): - # Customized to split up the queries and extract the correct sourcetype - opts = _get_options() - logging.info('Options: %s' % json.dumps(opts)) - http_event_collector_key = opts['token'] - http_event_collector_host = opts['indexer'] - hec_ssl = opts['http_event_server_ssl'] - proxy = opts['proxy'] - timeout = opts['timeout'] - custom_fields = opts['custom_fields'] + opts_list = _get_options() # Gather amazon information if present aws_ami_id = None @@ -87,131 +76,141 @@ def returner(ret): # Not on an AWS box pass - # Set up the collector - hec = http_event_collector(http_event_collector_key, http_event_collector_host, http_event_server_ssl=hec_ssl, proxy=proxy, timeout=timeout) - # st = 'salt:hubble:nova' - data = ret['return'] - minion_id = ret['id'] - jid = ret['jid'] - fqdn = __grains__['fqdn'] - # Sometimes fqdn is blank. If it is, replace it with minion_id - fqdn = fqdn if fqdn else minion_id - master = __grains__['master'] - try: - fqdn_ip4 = __grains__['fqdn_ip4'][0] - except IndexError: - fqdn_ip4 = __grains__['ipv4'][0] - - if __grains__['master']: + for opts in opts_list: + logging.info('Options: %s' % json.dumps(opts)) + http_event_collector_key = opts['token'] + http_event_collector_host = opts['indexer'] + hec_ssl = opts['http_event_server_ssl'] + proxy = opts['proxy'] + timeout = opts['timeout'] + custom_fields = opts['custom_fields'] + + + # Set up the collector + hec = http_event_collector(http_event_collector_key, http_event_collector_host, http_event_server_ssl=hec_ssl, proxy=proxy, timeout=timeout) + # st = 'salt:hubble:nova' + data = ret['return'] + minion_id = ret['id'] + jid = ret['jid'] + fqdn = __grains__['fqdn'] + # Sometimes fqdn is blank. If it is, replace it with minion_id + fqdn = fqdn if fqdn else minion_id master = __grains__['master'] - else: - master = socket.gethostname() # We *are* the master, so use our hostname - - for fai in data.get('Failure', []): - check_id = fai.keys()[0] - payload = {} - event = {} - event.update({'check_result': 'Failure'}) - event.update({'check_id': check_id}) - event.update({'job_id': jid}) - if not isinstance(fai[check_id], dict): - event.update({'description': fai[check_id]}) - elif 'description' in fai[check_id]: - for key, value in fai[check_id].iteritems(): - if key not in ['tag']: - event[key] = value - event.update({'master': master}) - event.update({'minion_id': minion_id}) - event.update({'dest_host': fqdn}) - event.update({'dest_ip': fqdn_ip4}) - - if aws_instance_id is not None: - event.update({'aws_ami_id': aws_ami_id}) - event.update({'aws_instance_id': aws_instance_id}) - - for custom_field in custom_fields: - custom_field_name = 'custom_' + custom_field - custom_field_value = __salt__['config.get'](custom_field, '') - if isinstance(custom_field_value, str): - event.update({custom_field_name: custom_field_value}) - elif isinstance(custom_field_value, list): - custom_field_value = ','.join(custom_field_value) - event.update({custom_field_name: custom_field_value}) - - payload.update({'host': fqdn}) - payload.update({'index': opts['index']}) - payload.update({'sourcetype': opts['sourcetype']}) - payload.update({'event': event}) - hec.batchEvent(payload) - - for suc in data.get('Success', []): - check_id = suc.keys()[0] - payload = {} - event = {} - event.update({'check_result': 'Success'}) - event.update({'check_id': check_id}) - event.update({'job_id': jid}) - if not isinstance(suc[check_id], dict): - event.update({'description': suc[check_id]}) - elif 'description' in suc[check_id]: - for key, value in suc[check_id].iteritems(): - if key not in ['tag']: - event[key] = value - event.update({'master': master}) - event.update({'minion_id': minion_id}) - event.update({'dest_host': fqdn}) - event.update({'dest_ip': fqdn_ip4}) - - if aws_instance_id is not None: - event.update({'aws_ami_id': aws_ami_id}) - event.update({'aws_instance_id': aws_instance_id}) - event.update({'aws_account_id': aws_account_id}) - - for custom_field in custom_fields: - custom_field_name = 'custom_' + custom_field - custom_field_value = __salt__['config.get'](custom_field, '') - if isinstance(custom_field_value, str): - event.update({custom_field_name: custom_field_value}) - elif isinstance(custom_field_value, list): - custom_field_value = ','.join(custom_field_value) - event.update({custom_field_name: custom_field_value}) - - payload.update({'host': fqdn}) - payload.update({'sourcetype': opts['sourcetype']}) - payload.update({'index': opts['index']}) - payload.update({'event': event}) - hec.batchEvent(payload) - - if data.get('Compliance', None): - payload = {} - event = {} - event.update({'job_id': jid}) - event.update({'compliance_percentage': data['Compliance']}) - event.update({'master': master}) - event.update({'minion_id': minion_id}) - event.update({'dest_host': fqdn}) - event.update({'dest_ip': fqdn_ip4}) - - if aws_instance_id is not None: - event.update({'aws_ami_id': aws_ami_id}) - event.update({'aws_instance_id': aws_instance_id}) - - for custom_field in custom_fields: - custom_field_name = 'custom_' + custom_field - custom_field_value = __salt__['config.get'](custom_field, '') - if isinstance(custom_field_value, str): - event.update({custom_field_name: custom_field_value}) - elif isinstance(custom_field_value, list): - custom_field_value = ','.join(custom_field_value) - event.update({custom_field_name: custom_field_value}) - - payload.update({'host': fqdn}) - payload.update({'sourcetype': opts['sourcetype']}) - payload.update({'index': opts['index']}) - payload.update({'event': event}) - hec.batchEvent(payload) + try: + fqdn_ip4 = __grains__['fqdn_ip4'][0] + except IndexError: + fqdn_ip4 = __grains__['ipv4'][0] - hec.flushBatch() + if __grains__['master']: + master = __grains__['master'] + else: + master = socket.gethostname() # We *are* the master, so use our hostname + + for fai in data.get('Failure', []): + check_id = fai.keys()[0] + payload = {} + event = {} + event.update({'check_result': 'Failure'}) + event.update({'check_id': check_id}) + event.update({'job_id': jid}) + if not isinstance(fai[check_id], dict): + event.update({'description': fai[check_id]}) + elif 'description' in fai[check_id]: + for key, value in fai[check_id].iteritems(): + if key not in ['tag']: + event[key] = value + event.update({'master': master}) + event.update({'minion_id': minion_id}) + event.update({'dest_host': fqdn}) + event.update({'dest_ip': fqdn_ip4}) + + if aws_instance_id is not None: + event.update({'aws_ami_id': aws_ami_id}) + event.update({'aws_instance_id': aws_instance_id}) + + for custom_field in custom_fields: + custom_field_name = 'custom_' + custom_field + custom_field_value = __salt__['config.get'](custom_field, '') + if isinstance(custom_field_value, str): + event.update({custom_field_name: custom_field_value}) + elif isinstance(custom_field_value, list): + custom_field_value = ','.join(custom_field_value) + event.update({custom_field_name: custom_field_value}) + + payload.update({'host': fqdn}) + payload.update({'index': opts['index']}) + payload.update({'sourcetype': opts['sourcetype']}) + payload.update({'event': event}) + hec.batchEvent(payload) + + for suc in data.get('Success', []): + check_id = suc.keys()[0] + payload = {} + event = {} + event.update({'check_result': 'Success'}) + event.update({'check_id': check_id}) + event.update({'job_id': jid}) + if not isinstance(suc[check_id], dict): + event.update({'description': suc[check_id]}) + elif 'description' in suc[check_id]: + for key, value in suc[check_id].iteritems(): + if key not in ['tag']: + event[key] = value + event.update({'master': master}) + event.update({'minion_id': minion_id}) + event.update({'dest_host': fqdn}) + event.update({'dest_ip': fqdn_ip4}) + + if aws_instance_id is not None: + event.update({'aws_ami_id': aws_ami_id}) + event.update({'aws_instance_id': aws_instance_id}) + event.update({'aws_account_id': aws_account_id}) + + for custom_field in custom_fields: + custom_field_name = 'custom_' + custom_field + custom_field_value = __salt__['config.get'](custom_field, '') + if isinstance(custom_field_value, str): + event.update({custom_field_name: custom_field_value}) + elif isinstance(custom_field_value, list): + custom_field_value = ','.join(custom_field_value) + event.update({custom_field_name: custom_field_value}) + + payload.update({'host': fqdn}) + payload.update({'sourcetype': opts['sourcetype']}) + payload.update({'index': opts['index']}) + payload.update({'event': event}) + hec.batchEvent(payload) + + if data.get('Compliance', None): + payload = {} + event = {} + event.update({'job_id': jid}) + event.update({'compliance_percentage': data['Compliance']}) + event.update({'master': master}) + event.update({'minion_id': minion_id}) + event.update({'dest_host': fqdn}) + event.update({'dest_ip': fqdn_ip4}) + + if aws_instance_id is not None: + event.update({'aws_ami_id': aws_ami_id}) + event.update({'aws_instance_id': aws_instance_id}) + + for custom_field in custom_fields: + custom_field_name = 'custom_' + custom_field + custom_field_value = __salt__['config.get'](custom_field, '') + if isinstance(custom_field_value, str): + event.update({custom_field_name: custom_field_value}) + elif isinstance(custom_field_value, list): + custom_field_value = ','.join(custom_field_value) + event.update({custom_field_name: custom_field_value}) + + payload.update({'host': fqdn}) + payload.update({'sourcetype': opts['sourcetype']}) + payload.update({'index': opts['index']}) + payload.update({'event': event}) + hec.batchEvent(payload) + + hec.flushBatch() return @@ -235,22 +234,40 @@ def event_return(event): def _get_options(): - try: - token = __salt__['config.get']('hubblestack:nova:returner:splunk:token').strip() - indexer = __salt__['config.get']('hubblestack:nova:returner:splunk:indexer') - sourcetype = __salt__['config.get']('hubblestack:nova:returner:splunk:sourcetype') - index = __salt__['config.get']('hubblestack:nova:returner:splunk:index') - custom_fields = __salt__['config.get']('hubblestack:nebula:returner:splunk:custom_fields', []) - except: - return None - splunk_opts = {'token': token, 'indexer': indexer, 'sourcetype': sourcetype, 'index': index, 'custom_fields': custom_fields} - - hec_ssl = __salt__['config.get']('hubblestack:nova:returner:splunk:hec_ssl', True) - splunk_opts['http_event_server_ssl'] = hec_ssl - splunk_opts['proxy'] = __salt__['config.get']('hubblestack:nova:returner:splunk:proxy', {}) - splunk_opts['timeout'] = __salt__['config.get']('hubblestack:nova:returner:splunk:timeout', 9.05) - - return splunk_opts + if __salt__['config.get']('hubblestack:returner:splunk'): + splunk_opts = [] + returner_opts = __salt__['config.get']('hubblestack:returner:splunk') + if not isinstance(returner_opts, list): + returner_opts = [returner_opts] + for opt in returner_opts: + processed = {} + processed['token'] = opt.get('token') + processed['indexer'] = opt.get('indexer') + processed['index'] = opt.get('index') + processed['custom_fields'] = opt.get('custom_fields', []) + processed['sourcetype'] = opt.get('sourcetype_nova', 'hubble_audit') + processed['http_event_server_ssl'] = opt.get('hec_ssl', True) + processed['proxy'] = opt.get('proxy', {}) + processed['timeout'] = opt.get('timeout', 9.05) + splunk_opts.append(processed) + return splunk_opts + else: + try: + token = __salt__['config.get']('hubblestack:nova:returner:splunk:token').strip() + indexer = __salt__['config.get']('hubblestack:nova:returner:splunk:indexer') + sourcetype = __salt__['config.get']('hubblestack:nova:returner:splunk:sourcetype') + index = __salt__['config.get']('hubblestack:nova:returner:splunk:index') + custom_fields = __salt__['config.get']('hubblestack:nebula:returner:splunk:custom_fields', []) + except: + return None + splunk_opts = {'token': token, 'indexer': indexer, 'sourcetype': sourcetype, 'index': index, 'custom_fields': custom_fields} + + hec_ssl = __salt__['config.get']('hubblestack:nova:returner:splunk:hec_ssl', True) + splunk_opts['http_event_server_ssl'] = hec_ssl + splunk_opts['proxy'] = __salt__['config.get']('hubblestack:nova:returner:splunk:proxy', {}) + splunk_opts['timeout'] = __salt__['config.get']('hubblestack:nova:returner:splunk:timeout', 9.05) + + return [splunk_opts] def send_splunk(event, index_override=None, sourcetype_override=None): diff --git a/_returners/splunk_pulsar_return.py b/_returners/splunk_pulsar_return.py index b4ef95b..194fbbb 100644 --- a/_returners/splunk_pulsar_return.py +++ b/_returners/splunk_pulsar_return.py @@ -3,7 +3,6 @@ HubbleStack Pulsar-to-Splunk returner :maintainer: HubbleStack -:maturity: 2016.10.4 :platform: All :requires: SaltStack @@ -13,13 +12,12 @@ .. code-block:: yaml hubblestack: - pulsar: - returner: - splunk: - token: - indexer: - sourcetype: - index: + returner: + splunk: + - token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + indexer: splunk-indexer.domain.tld + index: hubble + sourcetype_pulsar: hubble_fim You can also add an `custom_fields` argument which is a list of keys to add to events with using the results of config.get(). These new keys will be prefixed @@ -30,13 +28,12 @@ .. code-block:: yaml hubblestack: - pulsar: - returner: - splunk: - token: - indexer: - sourcetype: - index: + returner: + splunk: + - token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + indexer: splunk-indexer.domain.tld + index: hubble + sourcetype_pulsar: hubble_fim custom_fields: - site - product_group @@ -68,15 +65,8 @@ def returner(ret): if isinstance(ret, dict) and not ret.get('return'): # Empty single return, let's not do any setup or anything return - # Customized to split up the change events and send to Splunk. - opts = _get_options() - logging.info('Options: %s' % json.dumps(opts)) - http_event_collector_key = opts['token'] - http_event_collector_host = opts['indexer'] - hec_ssl = opts['http_event_server_ssl'] - proxy = opts['proxy'] - timeout = opts['timeout'] - custom_fields = opts['custom_fields'] + + opts_list = _get_options() # Gather amazon information if present aws_ami_id = None @@ -93,147 +83,156 @@ def returner(ret): # Not on an AWS box pass - # Set up the collector - hec = http_event_collector(http_event_collector_key, http_event_collector_host, http_event_server_ssl=hec_ssl, proxy=proxy, timeout=timeout) - # Check whether or not data is batched: - if isinstance(ret, dict): # Batching is disabled - data = [ret] - else: - data = ret - # Sometimes there are duplicate events in the list. Dedup them: - data = _dedupList(data) - minion_id = __opts__['id'] - fqdn = __grains__['fqdn'] - # Sometimes fqdn is blank. If it is, replace it with minion_id - fqdn = fqdn if fqdn else minion_id - master = __grains__['master'] - try: - fqdn_ip4 = __grains__['fqdn_ip4'][0] - except IndexError: - fqdn_ip4 = __grains__['ipv4'][0] - - alerts = [] - for item in data: - events = item['return'] - if not isinstance(events, list): - events = [events] - alerts.extend(events) - - for alert in alerts: - event = {} - payload = {} - if('change' in alert): # Linux, normal pulsar - # The second half of the change will be '|IN_ISDIR' for directories - change = alert['change'].split('|')[0] - # Skip the IN_IGNORED events - if change == 'IN_IGNORED': - continue - if len(alert['change'].split('|')) == 2: - object_type = 'directory' - else: - object_type = 'file' - - actions = defaultdict(lambda: 'unknown') - actions['IN_ACCESS'] = 'read' - actions['IN_ATTRIB'] = 'acl_modified' - actions['IN_CLOSE_NOWRITE'] = 'read' - actions['IN_CLOSE_WRITE'] = 'read' - actions['IN_CREATE'] = 'created' - actions['IN_DELETE'] = 'deleted' - actions['IN_DELETE_SELF'] = 'deleted' - actions['IN_MODIFY'] = 'modified' - actions['IN_MOVE_SELF'] = 'modified' - actions['IN_MOVED_FROM'] = 'modified' - actions['IN_MOVED_TO'] = 'modified' - actions['IN_OPEN'] = 'read' - actions['IN_MOVE'] = 'modified' - actions['IN_CLOSE'] = 'read' - - event['action'] = actions[change] - event['change_type'] = 'filesystem' - event['object_category'] = object_type - event['object_path'] = alert['path'] - event['file_name'] = alert['name'] - event['file_path'] = alert['tag'] - - if alert['stats']: # Gather more data if the change wasn't a delete - stats = alert['stats'] - event['object_id'] = stats['inode'] - event['file_acl'] = stats['mode'] - event['file_create_time'] = stats['ctime'] - event['file_modify_time'] = stats['mtime'] - event['file_size'] = stats['size'] / 1024.0 # Convert bytes to kilobytes - event['user'] = stats['user'] - event['group'] = stats['group'] - if object_type == 'file': - event['file_hash'] = alert['checksum'] - event['file_hash_type'] = alert['checksum_type'] - - else: # Windows, win_pulsar - change = alert['Accesses'] - if alert['Hash'] == 'Item is a directory': - object_type = 'directory' - else: - object_type = 'file' - - actions = defaultdict(lambda: 'unknown') - actions['Delete'] = 'deleted' - actions['Read Control'] = 'read' - actions['Write DAC'] = 'acl_modified' - actions['Write Owner'] = 'modified' - actions['Synchronize'] = 'modified' - actions['Access Sys Sec'] = 'read' - actions['Read Data'] = 'read' - actions['Write Data'] = 'modified' - actions['Append Data'] = 'modified' - actions['Read EA'] = 'read' - actions['Write EA'] = 'modified' - actions['Execute/Traverse'] = 'read' - actions['Read Attributes'] = 'read' - actions['Write Attributes'] = 'acl_modified' - actions['Query Key Value'] = 'read' - actions['Set Key Value'] = 'modified' - actions['Create Sub Key'] = 'created' - actions['Enumerate Sub-Keys'] = 'read' - actions['Notify About Changes to Keys'] = 'read' - actions['Create Link'] = 'created' - actions['Print'] = 'read' - - event['action'] = actions[change] - event['change_type'] = 'filesystem' - event['object_category'] = object_type - event['object_path'] = alert['Object Name'] - event['file_name'] = os.path.basename(alert['Object Name']) - event['file_path'] = os.path.dirname(alert['Object Name']) - # TODO: Should we be reporting 'EntryType' or 'TimeGenerated? - # EntryType reports whether attempt to change was successful. - - event.update({'master': master}) - event.update({'minion_id': minion_id}) - event.update({'dest_host': fqdn}) - event.update({'dest_ip': fqdn_ip4}) - - if aws_instance_id is not None: - event.update({'aws_ami_id': aws_ami_id}) - event.update({'aws_instance_id': aws_instance_id}) - event.update({'aws_account_id': aws_account_id}) - - for custom_field in custom_fields: - custom_field_name = 'custom_' + custom_field - custom_field_value = __salt__['config.get'](custom_field, '') - if isinstance(custom_field_value, str): - event.update({custom_field_name: custom_field_value}) - elif isinstance(custom_field_value, list): - custom_field_value = ','.join(custom_field_value) - event.update({custom_field_name: custom_field_value}) - - payload.update({'host': fqdn}) - payload.update({'index': opts['index']}) - payload.update({'sourcetype': opts['sourcetype']}) - payload.update({'event': event}) - hec.batchEvent(payload) - - hec.flushBatch() + for opts in opts_list: + logging.info('Options: %s' % json.dumps(opts)) + http_event_collector_key = opts['token'] + http_event_collector_host = opts['indexer'] + hec_ssl = opts['http_event_server_ssl'] + proxy = opts['proxy'] + timeout = opts['timeout'] + custom_fields = opts['custom_fields'] + + # Set up the collector + hec = http_event_collector(http_event_collector_key, http_event_collector_host, http_event_server_ssl=hec_ssl, proxy=proxy, timeout=timeout) + # Check whether or not data is batched: + if isinstance(ret, dict): # Batching is disabled + data = [ret] + else: + data = ret + # Sometimes there are duplicate events in the list. Dedup them: + data = _dedupList(data) + minion_id = __opts__['id'] + fqdn = __grains__['fqdn'] + # Sometimes fqdn is blank. If it is, replace it with minion_id + fqdn = fqdn if fqdn else minion_id + master = __grains__['master'] + try: + fqdn_ip4 = __grains__['fqdn_ip4'][0] + except IndexError: + fqdn_ip4 = __grains__['ipv4'][0] + + alerts = [] + for item in data: + events = item['return'] + if not isinstance(events, list): + events = [events] + alerts.extend(events) + + for alert in alerts: + event = {} + payload = {} + if('change' in alert): # Linux, normal pulsar + # The second half of the change will be '|IN_ISDIR' for directories + change = alert['change'].split('|')[0] + # Skip the IN_IGNORED events + if change == 'IN_IGNORED': + continue + if len(alert['change'].split('|')) == 2: + object_type = 'directory' + else: + object_type = 'file' + + actions = defaultdict(lambda: 'unknown') + actions['IN_ACCESS'] = 'read' + actions['IN_ATTRIB'] = 'acl_modified' + actions['IN_CLOSE_NOWRITE'] = 'read' + actions['IN_CLOSE_WRITE'] = 'read' + actions['IN_CREATE'] = 'created' + actions['IN_DELETE'] = 'deleted' + actions['IN_DELETE_SELF'] = 'deleted' + actions['IN_MODIFY'] = 'modified' + actions['IN_MOVE_SELF'] = 'modified' + actions['IN_MOVED_FROM'] = 'modified' + actions['IN_MOVED_TO'] = 'modified' + actions['IN_OPEN'] = 'read' + actions['IN_MOVE'] = 'modified' + actions['IN_CLOSE'] = 'read' + + event['action'] = actions[change] + event['change_type'] = 'filesystem' + event['object_category'] = object_type + event['object_path'] = alert['path'] + event['file_name'] = alert['name'] + event['file_path'] = alert['tag'] + + if alert['stats']: # Gather more data if the change wasn't a delete + stats = alert['stats'] + event['object_id'] = stats['inode'] + event['file_acl'] = stats['mode'] + event['file_create_time'] = stats['ctime'] + event['file_modify_time'] = stats['mtime'] + event['file_size'] = stats['size'] / 1024.0 # Convert bytes to kilobytes + event['user'] = stats['user'] + event['group'] = stats['group'] + if object_type == 'file': + event['file_hash'] = alert['checksum'] + event['file_hash_type'] = alert['checksum_type'] + + else: # Windows, win_pulsar + change = alert['Accesses'] + if alert['Hash'] == 'Item is a directory': + object_type = 'directory' + else: + object_type = 'file' + + actions = defaultdict(lambda: 'unknown') + actions['Delete'] = 'deleted' + actions['Read Control'] = 'read' + actions['Write DAC'] = 'acl_modified' + actions['Write Owner'] = 'modified' + actions['Synchronize'] = 'modified' + actions['Access Sys Sec'] = 'read' + actions['Read Data'] = 'read' + actions['Write Data'] = 'modified' + actions['Append Data'] = 'modified' + actions['Read EA'] = 'read' + actions['Write EA'] = 'modified' + actions['Execute/Traverse'] = 'read' + actions['Read Attributes'] = 'read' + actions['Write Attributes'] = 'acl_modified' + actions['Query Key Value'] = 'read' + actions['Set Key Value'] = 'modified' + actions['Create Sub Key'] = 'created' + actions['Enumerate Sub-Keys'] = 'read' + actions['Notify About Changes to Keys'] = 'read' + actions['Create Link'] = 'created' + actions['Print'] = 'read' + + event['action'] = actions[change] + event['change_type'] = 'filesystem' + event['object_category'] = object_type + event['object_path'] = alert['Object Name'] + event['file_name'] = os.path.basename(alert['Object Name']) + event['file_path'] = os.path.dirname(alert['Object Name']) + # TODO: Should we be reporting 'EntryType' or 'TimeGenerated? + # EntryType reports whether attempt to change was successful. + + event.update({'master': master}) + event.update({'minion_id': minion_id}) + event.update({'dest_host': fqdn}) + event.update({'dest_ip': fqdn_ip4}) + + if aws_instance_id is not None: + event.update({'aws_ami_id': aws_ami_id}) + event.update({'aws_instance_id': aws_instance_id}) + event.update({'aws_account_id': aws_account_id}) + + for custom_field in custom_fields: + custom_field_name = 'custom_' + custom_field + custom_field_value = __salt__['config.get'](custom_field, '') + if isinstance(custom_field_value, str): + event.update({custom_field_name: custom_field_value}) + elif isinstance(custom_field_value, list): + custom_field_value = ','.join(custom_field_value) + event.update({custom_field_name: custom_field_value}) + + payload.update({'host': fqdn}) + payload.update({'index': opts['index']}) + payload.update({'sourcetype': opts['sourcetype']}) + payload.update({'event': event}) + hec.batchEvent(payload) + + hec.flushBatch() return @@ -246,22 +245,40 @@ def _dedupList(l): def _get_options(): - try: - token = __salt__['config.get']('hubblestack:pulsar:returner:splunk:token').strip() - indexer = __salt__['config.get']('hubblestack:pulsar:returner:splunk:indexer') - sourcetype = __salt__['config.get']('hubblestack:pulsar:returner:splunk:sourcetype') - index = __salt__['config.get']('hubblestack:pulsar:returner:splunk:index') - custom_fields = __salt__['config.get']('hubblestack:nebula:returner:splunk:custom_fields', []) - except: - return None - splunk_opts = {'token': token, 'indexer': indexer, 'sourcetype': sourcetype, 'index': index, 'custom_fields': custom_fields} - - hec_ssl = __salt__['config.get']('hubblestack:pulsar:returner:splunk:hec_ssl', True) - splunk_opts['http_event_server_ssl'] = hec_ssl - splunk_opts['proxy'] = __salt__['config.get']('hubblestack:pulsar:returner:splunk:proxy', {}) - splunk_opts['timeout'] = __salt__['config.get']('hubblestack:pulsar:returner:splunk:timeout', 9.05) - - return splunk_opts + if __salt__['config.get']('hubblestack:returner:splunk'): + splunk_opts = [] + returner_opts = __salt__['config.get']('hubblestack:returner:splunk') + if not isinstance(returner_opts, list): + returner_opts = [returner_opts] + for opt in returner_opts: + processed = {} + processed['token'] = opt.get('token') + processed['indexer'] = opt.get('indexer') + processed['index'] = opt.get('index') + processed['custom_fields'] = opt.get('custom_fields', []) + processed['sourcetype'] = opt.get('sourcetype_pulsar', 'hubble_fim') + processed['http_event_server_ssl'] = opt.get('hec_ssl', True) + processed['proxy'] = opt.get('proxy', {}) + processed['timeout'] = opt.get('timeout', 9.05) + splunk_opts.append(processed) + return splunk_opts + else: + try: + token = __salt__['config.get']('hubblestack:pulsar:returner:splunk:token').strip() + indexer = __salt__['config.get']('hubblestack:pulsar:returner:splunk:indexer') + sourcetype = __salt__['config.get']('hubblestack:pulsar:returner:splunk:sourcetype') + index = __salt__['config.get']('hubblestack:pulsar:returner:splunk:index') + custom_fields = __salt__['config.get']('hubblestack:nebula:returner:splunk:custom_fields', []) + except: + return None + splunk_opts = {'token': token, 'indexer': indexer, 'sourcetype': sourcetype, 'index': index, 'custom_fields': custom_fields} + + hec_ssl = __salt__['config.get']('hubblestack:pulsar:returner:splunk:hec_ssl', True) + splunk_opts['http_event_server_ssl'] = hec_ssl + splunk_opts['proxy'] = __salt__['config.get']('hubblestack:pulsar:returner:splunk:proxy', {}) + splunk_opts['timeout'] = __salt__['config.get']('hubblestack:pulsar:returner:splunk:timeout', 9.05) + + return [splunk_opts] # Thanks to George Starcher for the http_event_collector class (https://github.com/georgestarcher/) From 63f1e0d1bea15a7a7e0340bccab90dc9d47f5992 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Mon, 6 Mar 2017 15:42:45 -0700 Subject: [PATCH 18/18] Rev to v2017.3.1 --- README.md | 2 +- _beacons/pulsar.py | 2 +- _beacons/win_pulsar.py | 2 +- _modules/hubble.py | 2 +- _modules/nebula_osquery.py | 2 +- _returners/slack_pulsar_returner.py | 2 +- _returners/splunk_nebula_return.py | 2 +- _returners/splunk_nova_return.py | 2 +- _returners/splunk_pulsar_return.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 66faacc..33a9a30 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ fileserver_backend: - git gitfs_remotes: - https://github.com/hubblestack/hubble-salt.git: - - base: v2017.1.0 + - base: v2017.3.1 - root: '' ``` diff --git a/_beacons/pulsar.py b/_beacons/pulsar.py index 23ec36d..0e96cc5 100644 --- a/_beacons/pulsar.py +++ b/_beacons/pulsar.py @@ -39,7 +39,7 @@ DEFAULT_MASK = None __virtualname__ = 'pulsar' -__version__ = 'v2017.1.1' +__version__ = 'v2017.3.1' CONFIG = None CONFIG_STALENESS = 0 diff --git a/_beacons/win_pulsar.py b/_beacons/win_pulsar.py index 2cb6f14..d685e18 100644 --- a/_beacons/win_pulsar.py +++ b/_beacons/win_pulsar.py @@ -25,7 +25,7 @@ DEFAULT_TYPE = 'all' __virtualname__ = 'pulsar' -__version__ = 'v2017.1.1' +__version__ = 'v2017.3.1' CONFIG = None CONFIG_STALENESS = 0 diff --git a/_modules/hubble.py b/_modules/hubble.py index 989811d..9b1337f 100644 --- a/_modules/hubble.py +++ b/_modules/hubble.py @@ -35,7 +35,7 @@ from salt.loader import LazyLoader __nova__ = {} -__version__ = 'v2017.1.1' +__version__ = 'v2017.3.1' def audit(configs=None, diff --git a/_modules/nebula_osquery.py b/_modules/nebula_osquery.py index b90a6fd..004d562 100644 --- a/_modules/nebula_osquery.py +++ b/_modules/nebula_osquery.py @@ -40,7 +40,7 @@ log = logging.getLogger(__name__) -__version__ = 'v2017.1.1' +__version__ = 'v2017.3.1' __virtualname__ = 'nebula' diff --git a/_returners/slack_pulsar_returner.py b/_returners/slack_pulsar_returner.py index fa68adf..02c6c5c 100644 --- a/_returners/slack_pulsar_returner.py +++ b/_returners/slack_pulsar_returner.py @@ -69,7 +69,7 @@ # Import Salt Libs import salt.returners -__version__ = 'v2017.1.1' +__version__ = 'v2017.3.1' log = logging.getLogger(__name__) diff --git a/_returners/splunk_nebula_return.py b/_returners/splunk_nebula_return.py index 2855e7f..4750d66 100644 --- a/_returners/splunk_nebula_return.py +++ b/_returners/splunk_nebula_return.py @@ -47,7 +47,7 @@ import logging -__version__ = 'v2017.1.1' +__version__ = 'v2017.3.1' _max_content_bytes = 100000 http_event_collector_SSL_verify = False diff --git a/_returners/splunk_nova_return.py b/_returners/splunk_nova_return.py index efed5fa..d1d02a1 100644 --- a/_returners/splunk_nova_return.py +++ b/_returners/splunk_nova_return.py @@ -47,7 +47,7 @@ import logging -__version__ = 'v2017.1.1' +__version__ = 'v2017.3.1' _max_content_bytes = 100000 http_event_collector_SSL_verify = False diff --git a/_returners/splunk_pulsar_return.py b/_returners/splunk_pulsar_return.py index 194fbbb..4254bb9 100644 --- a/_returners/splunk_pulsar_return.py +++ b/_returners/splunk_pulsar_return.py @@ -50,7 +50,7 @@ import logging -__version__ = 'v2017.1.1' +__version__ = 'v2017.3.1' _max_content_bytes = 100000 http_event_collector_SSL_verify = False