From 7f5efe916561ee318222740de1fdb9dbfdbe2257 Mon Sep 17 00:00:00 2001 From: Julien Richard Date: Sun, 4 Aug 2019 21:05:28 +0200 Subject: [PATCH] New documentation version for the new release --- opencti-documentation/website/package.json | 4 +- .../getting-started/requirements.md | 84 ++++++++++++ .../version-1.1.1/installation/connectors.md | 97 +++++++++++++ .../version-1.1.1/installation/docker.md | 114 ++++++++++++++++ .../version-1.1.1/installation/manual.md | 127 ++++++++++++++++++ .../version-1.1.1/reference/inferences.md | 86 ++++++++++++ opencti-documentation/website/versions.json | 1 + 7 files changed, 511 insertions(+), 2 deletions(-) create mode 100644 opencti-documentation/website/versioned_docs/version-1.1.1/getting-started/requirements.md create mode 100644 opencti-documentation/website/versioned_docs/version-1.1.1/installation/connectors.md create mode 100644 opencti-documentation/website/versioned_docs/version-1.1.1/installation/docker.md create mode 100644 opencti-documentation/website/versioned_docs/version-1.1.1/installation/manual.md create mode 100644 opencti-documentation/website/versioned_docs/version-1.1.1/reference/inferences.md diff --git a/opencti-documentation/website/package.json b/opencti-documentation/website/package.json index 9b62c57849aa..0b29481d32f9 100644 --- a/opencti-documentation/website/package.json +++ b/opencti-documentation/website/package.json @@ -1,4 +1,5 @@ { + "version": "1.1.1", "scripts": { "examples": "docusaurus-examples", "start": "docusaurus-start", @@ -10,6 +11,5 @@ }, "devDependencies": { "docusaurus": "^1.9.0" - }, - "version": "1.1.0" + } } diff --git a/opencti-documentation/website/versioned_docs/version-1.1.1/getting-started/requirements.md b/opencti-documentation/website/versioned_docs/version-1.1.1/getting-started/requirements.md new file mode 100644 index 000000000000..82e7eaa039df --- /dev/null +++ b/opencti-documentation/website/versioned_docs/version-1.1.1/getting-started/requirements.md @@ -0,0 +1,84 @@ +--- +id: version-1.1.1-requirements +title: Infrastructure requirements +sidebar_label: Infrastructure requirements +original_id: requirements +--- + +Since OpenCTI has some dependencies, you can find below the minimum configuration and amount of resources needed to launch the OpenCTI platform. + +## Total requirements + +The minimal hardware requirements for all components of the platforms including the databases are: + +| CPU | RAM | Disk type | Disk space | +| ------------- |---------------| ---------------------------|------------------------------------| +| 6 cores | 16GB | SSD (recommanded) / Normal | Depending of your content (> 32GB) | + +## Databases + +### Grakn + +Grakn is composed of 2 JAVA processes, one for Grakn itself and the other one for Cassandra. Each process requires a minimum of 4GB of memory. So Grakn needs: + +| CPU | RAM | Disk type | Disk space | +| ------------- |---------------| ---------------------------|------------------------------------| +| 2 cores | 8GB | SSD | Depending of your content (> 16GB) | + +> In order to setup the JAVA memory allocation, you can use the environment variable `SERVER_JAVAOPTS` and `STORAGE_JAVAOPTS`. You can find more information in the [official Grakn documentation](https://dev.grakn.ai/docs). + +### ElasticSearch + +ElasticSearch is also a JAVA process that needs a minimal amount of memory and CPUs. + +| CPU | RAM | Disk type | Disk space | +| ------------- |---------------| ---------------------------|------------------------------------| +| 2 cores | 1GB | Normal | Depending of your content (> 16GB) | + +> In order to setup the JAVA memory allocation, you can use the environment variable `ES_JAVA_OPTS`. You can find more information in the [official ElasticSearch documenation](ttps://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html). + +### Redis + +Redis has a very small footprint and only needs a tiny configuration: + +| CPU | RAM | Disk type | Disk space | +| ------------- |---------------| ---------------------------|-----------------------------------| +| 1 core | 128MB | Normal | 128MB | + +> You can use the option `--maxmemory` to limit the usage. You can find more information in the [Redis docker hub](https://hub.docker.com/r/bitnami/redis/). + +### RabbitMQ + +RabbitMQ has a very small footprint until and can store messages directly on the disk if it does not have enough memory. + +| CPU | RAM | Disk type | Disk space | +| ------------- |---------------| ---------------------------|-----------------------------------| +| 1 core | 128MB | Normal | 128MB | + +> The RabbitMQ memory configuration can be find in the [RabbitMQ official documentation](https://www.rabbitmq.com/memory.html). + +### Total + +The requirements for the databases infrastructure of OpenCTI are: + +| CPU | RAM | Disk type | Disk space | +| ------------- |---------------| ---------------------------|------------------------------------| +| 4 cores | 12GB | SSD (recommanded) / Normal | Depending of your content (> 32GB) | + +## Application + +### Platform + +OpenCTI platform is based on a NodeJS runtime, with a memory limit of **512MB by default**. + +| CPU | RAM | Disk type | Disk space | +| ------------- |---------------| ---------------------------|-----------------------------------| +| 1 core | 512MB | Normal | 256MB | + +### Workers and connectors + +OpenCTI workers and connectors are Python processes with a very small footprint. For each connector, requirements are: + +| CPU | RAM | Disk type | Disk space | +| ------------- |---------------| ---------------------------|-----------------------------------| +| 1 core | 128MB | Normal | 128MB | \ No newline at end of file diff --git a/opencti-documentation/website/versioned_docs/version-1.1.1/installation/connectors.md b/opencti-documentation/website/versioned_docs/version-1.1.1/installation/connectors.md new file mode 100644 index 000000000000..0b7604b30088 --- /dev/null +++ b/opencti-documentation/website/versioned_docs/version-1.1.1/installation/connectors.md @@ -0,0 +1,97 @@ +--- +id: version-1.1.1-connectors +title: Connectors activation +sidebar_label: Enable connectors +original_id: connectors +--- + +## Introduction + +Connectors are standalone processes that are independant of the rest of the platform. They are using RabbitMQ to push data to OpenCTI, through a dedicated queue for each instance of connector. Depending on your deployment, you can enable connectors by using the connectors Docker images or launch them manually. + +## Connector configurations + +All connectors have 2 mandatory configuration parameters, the `name` and the `confidence_level`. The `name` is the name of the instance of the connector. For instance, for the MISP connector, you can launch as many MISP connectors as you need, if you need to pull data from multiple MISP instances. + +> The `name` of each instance of connector must be unique. + +> The `confidence_level` of the connector will be used to set the `confidence_level` of the relationships created by the connector. If a connector needs to create a relationship that already exists, it will check the current `confidence_level` and if it is lower than its own, it will update the relationship with the new information. If it is higher, it will do nothing and keep the existing relationship. + +## Docker activation + +You can either directly run the Docker image of connectors or add them to your current `docker-compose.yml` file. + +### Add a connector to your deployment + +For instance, to enable the MISP connector, you can add a new service to your `docker-compose.yml` file: + +``` + connector-misp: + image: opencti/connector-misp:{RELEASE_VERSION} + environment: + - RABBITMQ_HOSTNAME=localhost + - RABBITMQ_PORT=5672 + - RABBITMQ_USERNAME=guest + - RABBITMQ_PASSWORD=guest + - MISP_NAME=MISP\ Circle + - MISP_CONFIDENCE_LEVEL=3 + - MISP_URL=http://localhost + - MISP_KEY=ChangeMe + - MISP_TAG=OpenCTI:\ Import + - MISP_UNTAG_EVENT=true + - MISP_IMPORTED_TAG=OpenCTI:\ Imported + - MISP_INTERVAL=1 # Minutes + - MISP_LOG_LEVEL=info + restart: always + ``` + +### Launch a standalone connector + +To launch standalone connector, you can use the `docker-compose.yml` file of the connector itself. Just download the [release](https://github.com/OpenCTI-Platform/connectors/archive/{RELEASE_VERSION}.zip) and start the connector: + +``` +$ wget https://github.com/OpenCTI-Platform/connectors/archive/{RELEASE_VERSION}.zip +$ unzip {RELEASE_VERSION}.zip +$ cd connectors-{RELEASE_VERSION}/misp/ +``` + +Change the configuration in the `docker-compose.yml` according to the parameters of the platform and of the targeted service. RabbitMQ credentials are the only parameters that the connector need to send data to OpenCTI. Then launch the connector: + +``` +$ docker-compose up +``` + +## Manual activation + +If you want to manually launch connector, you just have to install Python 3 and pip3 for dependencies: + +``` +$ apt install python3 python3-pip +``` + +Download the [release](https://github.com/OpenCTI-Platform/connectors/archive/{RELEASE_VERSION}.zip) of the connectors: + +``` +$ wget https://github.com/OpenCTI-Platform/connectors/archive/{RELEASE_VERSION}.zip +$ unzip {RELEASE_VERSION}.zip +$ cd connectors-{RELEASE_VERSION}/misp/src/ +``` + +Install dependencies and initialize the configuration: + +``` +$ pip3 install -r requirements.txt +$ cp config.yml.sample config.yml +``` + +Change the `config.yml` content according to the parameters of the platform and of the targeted service and launch the connector: + +``` +$ python3 misp.py +``` + +## Connectors status + +The connector status can be displayed in the dedicated section. You will be able to see the statistics of the RabbitMQ queue of the connector: + +![Connectors status](assets/installation/connectors_status.png "Connectors status") \ No newline at end of file diff --git a/opencti-documentation/website/versioned_docs/version-1.1.1/installation/docker.md b/opencti-documentation/website/versioned_docs/version-1.1.1/installation/docker.md new file mode 100644 index 000000000000..b9c54048455e --- /dev/null +++ b/opencti-documentation/website/versioned_docs/version-1.1.1/installation/docker.md @@ -0,0 +1,114 @@ +--- +id: version-1.1.1-docker +title: Docker installation +sidebar_label: Using Docker +original_id: docker +--- + +OpenCTI could be deployed using the *docker-compose* command. + +## Clone the repository + +```bash +$ mkdir /path/to/your/app && cd /path/to/your/app +$ git clone https://github.com/OpenCTI-Platform/docker.git +$ cd doker +``` + +### Configure the environment + +Before running the docker-compose command, please change the admin token (this token must be a [valid UUID](https://www.uuidgenerator.net/)) and password of the application in the file `docker-compose.yml`: + +```yaml +- APP__ADMIN__PASSWORD=ChangeMe +- APP__ADMIN__TOKEN=ChangeMe +``` + +And change the variable `OPENCTI_TOKEN` (for `worker-import` and `worker-export`) according to the value of `APP__ADMIN__TOKEN` + +```yaml +- OPENCTI_TOKEN=ChangeMe +``` + +As OpenCTI has a dependency to ElasticSearch, you have to set the `vm.max_map_count` before running the containers, as mentioned in the [ElasticSearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-cli-run-prod-mode). + +```bash +$ sysctl -w vm.max_map_count=262144 +``` + +## Run + +In order to have the best experience with Docker, we recommend to use the Docker stack feature. In this mode we will have the capacity to easily scale your deployment. + +### In Swarm or Kubernetes + +```bash +$ docker stack deploy -c docker-compose.yml opencti +``` + +### In standard Docker +```bash +$ docker-compose --compatibility up +``` + +You can now go to http://localhost:8080 and log in with the credentials configured in your environment variables. + +## Data persistence + +If you wish your OpenCTI data to be persistent in production, you should be aware of the `volumes` section for both `Grakn` and `ElasticSearch` services in the `docker-compose.yml`. + +Here is an example of volumes configuration: + +```yaml +volumes: + grakndata: + driver: local + driver_opts: + o: bind + type: none + esdata: + driver: local + driver_opts: + o: bind + type: none +``` + +## Memory configuration + +OpenCTI default `docker-compose.yml` file does not provide any specific memory configuration. But if you want to adapt some dependencies configuration, you can find some links below. + +### OpenCTI - Platform + +OpenCTI platform is based on a NodeJS runtime, with a memory limit of **512MB by default**. We do not provide any option to change this limit today. If you encounter any `OutOfMemory` exception, please open a [Github issue](https://github.com/OpenCTI-Platform/opencti/issues/new?assignees=&labels=&template=bug_report.md&title=). + +### OpenCTI - Workers and connectors + +OpenCTI workers and connectors are Python processes. If you want to limit the memory of the process we recommend to directly use Docker to do that. You can find more information in the [official Docker documentation](https://docs.docker.com/compose/compose-file/). + +> If you do not use Docker stack, think about `--compatibility` option. + +### Grakn + +Grakn is a JAVA process that rely on Cassandra (also a JAVA process). In order to setup the JAVA memory allocation, you can use the environment variable `SERVER_JAVAOPTS` and `STORAGE_JAVAOPTS`. + +> The current recommendation is `-Xms4G` for both options. + +You can find more information in the [official Grakn documentation](https://dev.grakn.ai/docs). + +### ElasticSearch + +ElasticSearch is also a JAVA process. In order to setup the JAVA memory allocation, you can use the environment variable `ES_JAVA_OPTS`. + +> The minimal recommended option today is `-Xms512M -Xmx512M`. + +You can find more information in the [official ElasticSearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html). + +### Redis + +Redis has a very small footprint and only provides an option to limit the maximum amount of memory that can be used by the process. You can use the option `--maxmemory` to limit the usage. + +You can find more information in the [Redis docker hub](https://hub.docker.com/r/bitnami/redis/). + +### RabbitMQ + +The RabbitMQ memory configuration can be find in the [RabbitMQ official documentation](https://www.rabbitmq.com/memory.html). Basically RabbitMQ will consumed memory until a specific threshold. So it should be configure along with the Docker memory limitation. diff --git a/opencti-documentation/website/versioned_docs/version-1.1.1/installation/manual.md b/opencti-documentation/website/versioned_docs/version-1.1.1/installation/manual.md new file mode 100644 index 000000000000..3a06e7f512e5 --- /dev/null +++ b/opencti-documentation/website/versioned_docs/version-1.1.1/installation/manual.md @@ -0,0 +1,127 @@ +--- +id: version-1.1.1-manual +title: Manual installation +sidebar_label: Manual deployment +original_id: manual +--- + +## Prerequisites + +- Node.JS (>= 10) +- Grakn (>= 1.5.7) +- Redis (>= 3.0) +- ElasticSearch (== 6.x.x) +- RabbitMQ (>= 3.7) + + +## Prepare the installation + +### Installation of dependencies + +You have to install all the needed dependencies for the main application and the workers. The example below if for Ubuntu: + +```bash +$ sudo apt-get install nodejs npm python3 python3-pip +``` + +### Download the application files + +Download and extract the latest release file. + +```bash +$ mkdir /path/to/your/app && cd /path/to/your/app +$ wget https://github.com/OpenCTI-Platform/opencti/releases/download/{RELEASE_VERSION}/opencti-release.tar.gz +$ tar xvfz opencti-release.tar.gz +``` + +## Install the main platform + +### Configure the application + +The main application has just one JSON configuration file to change. + +```bash +$ cd opencti +$ cp config/default.json config/production.json +``` + +Change the *config/production.json* file according to your configuration of Grakn, Redis, ElasticSearch, RabbitMQ and default credentials (the `ADMIN_TOKEN` must be a [valid UUID](https://www.uuidgenerator.net/)). + +### Database schema and initial data + +After the configuration, you can create your database schema and add initial data. + +```bash +$ npm run schema +$ npm run migrate +``` + +### Start the application + +The application is just a NodeJS process. + +```bash +$ node dist/server.js & +``` + +The default username and password are those you put in the `config/production.json` file. + +## Install the workers + +2 different workers must be configured to allow the platform to import and export data. One is for import and the other for export. + +### Install the import worker + +#### Configure the import worker + +Just copy the worker directory to a new one, named `worker-import`. + +```bash +$ cp -a worker worker-import +$ cd worker-import +$ cp config.yml.sample config.yml +``` + +Change the *config.yml* file according to your OpenCTI token and RabbitMQ configuration. + +> The worker type must be set to "import" + +#### Start as many workers as you need +```bash +$ python3 worker.py & +$ python3 worker.py & +``` + +### Install the export worker + +#### Configure the export worker + +Just copy the worker directory to a new one, named `worker-export`. + +```bash +$ cd .. +$ cp -a worker worker-export +$ cd worker-export +$ cp config.yml.sample config.yml +``` + +Change the *config.yml* file according to your OpenCTI token and RabbitMQ configuration. + +> The worker type must be set to "export" + +#### Start as many workers as you need +```bash +$ python3 worker.py & +$ python3 worker.py & +``` + +## Upgrade the platform + +When upgrading the platform, you have to replace all files and run the migrations and the schema commands to get updates: + +```bash +$ npm run schema +$ npm run migrate +``` + +Then start the platform. \ No newline at end of file diff --git a/opencti-documentation/website/versioned_docs/version-1.1.1/reference/inferences.md b/opencti-documentation/website/versioned_docs/version-1.1.1/reference/inferences.md new file mode 100644 index 000000000000..dcf70731d35a --- /dev/null +++ b/opencti-documentation/website/versioned_docs/version-1.1.1/reference/inferences.md @@ -0,0 +1,86 @@ +--- +id: version-1.1.1-inferences +title: Inferred relations +sidebar_label: Inferred relations +original_id: inferences +--- + +## Introduction + +OpenCTI is based on an [entities-relations model](../usage/model) that allows users to connect many entities together. In some cases, it could be interesting that some facts to be automatically inferred from others. For instance, if a `campaign` targeted the sector of `electricity`, which is a sub-sector of the `energy` sector, and is attributed to an `intrusion set`, the analyst wants to know that this specific `intrusion set` has targeted the `energy` sector. + +In OpenCTI, this can be represented by: + +![Relations](assets/reference/relations.png "Relations") + +To derive the implicit facts of this kind of knowledge, OpenCTI relies on the [inferences capability of the Grakn database](https://dev.grakn.ai/docs/schema/rules). The result is explained directly in the application when displaying an inferred relation: + +![Inference 1](assets/reference/inference1.png "Inference 1") + +## Implemented rules of inferences + +The implemented rules are expressed here in pseudo-code. + +### Usage rules + +
when {
+	A attributed-to B
+	A uses C
+}, then {
+	B uses C
+}
+
+ +### Target rules + +
when {
+	A attributed-to B
+	A targets C
+}, then {
+	B targets C
+}
+
+ +
when {
+	A targets B
+	A uses C
+}, then {
+	C targets A
+}
+
+ +
when {
+	A part-of (gathering) B
+	C targets A
+}, then {
+	C targets B
+}
+
+ +
when {
+	A localized-in (localization) B
+	C targets A
+}, then {
+	C targets B
+}
+
+ +### Attribution rules + +
when {
+	A attributed-to B
+	B attributed-to C
+}, then {
+	A attributed-to C
+}
+
+ +### Localization rules + +
when {
+	A localized-in (localization) B
+	B localized-in (localization) C
+}, then {
+	A localized-in (localization) C
+}
+
\ No newline at end of file diff --git a/opencti-documentation/website/versions.json b/opencti-documentation/website/versions.json index 3e34c5dace8f..ef18b160e461 100644 --- a/opencti-documentation/website/versions.json +++ b/opencti-documentation/website/versions.json @@ -1,4 +1,5 @@ [ + "1.1.1", "1.1.0", "1.0.2" ]