diff --git a/.github/workflows/action-updater.yml b/.github/workflows/action-updater.yml index e93f79166..ca2c6ba9d 100644 --- a/.github/workflows/action-updater.yml +++ b/.github/workflows/action-updater.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.1.4 with: # [Required] Access token with `workflow` scope. token: ${{ secrets.ACTION_UPDATER }} diff --git a/.github/workflows/adoc-html.yml b/.github/workflows/adoc-html.yml index 1c71b531d..72c4c80ec 100644 --- a/.github/workflows/adoc-html.yml +++ b/.github/workflows/adoc-html.yml @@ -9,8 +9,8 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: actions/setup-node@v4 + - uses: actions/checkout@v4.1.4 + - uses: actions/setup-node@v4.0.2 with: node-version: 20 - name: Convert adoc diff --git a/.github/workflows/backport-5-0.yml b/.github/workflows/backport-5-0.yml index 3dffc7647..a895ad53e 100644 --- a/.github/workflows/backport-5-0.yml +++ b/.github/workflows/backport-5-0.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v4.1.4 with: fetch-depth: 0 diff --git a/.github/workflows/backport-5-1.yml b/.github/workflows/backport-5-1.yml index 5964dfc32..42515b977 100644 --- a/.github/workflows/backport-5-1.yml +++ b/.github/workflows/backport-5-1.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v4.1.4 with: fetch-depth: 0 diff --git a/.github/workflows/backport-5-2.yml b/.github/workflows/backport-5-2.yml index 47f5aed75..cb57cada4 100644 --- a/.github/workflows/backport-5-2.yml +++ b/.github/workflows/backport-5-2.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v4.1.4 with: fetch-depth: 0 diff --git a/.github/workflows/backport-5-3.yml b/.github/workflows/backport-5-3.yml index 8c281af0a..3a2db103e 100644 --- a/.github/workflows/backport-5-3.yml +++ b/.github/workflows/backport-5-3.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v4.1.4 with: fetch-depth: 0 diff --git a/.github/workflows/backport-5-4.yml b/.github/workflows/backport-5-4.yml index c6ff5061a..fdd62ebf9 100644 --- a/.github/workflows/backport-5-4.yml +++ b/.github/workflows/backport-5-4.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v4.1.4 with: fetch-depth: 0 diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 2ad888348..a5ba1108c 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v4.1.4 with: fetch-depth: 0 diff --git a/.github/workflows/forwardport.yml b/.github/workflows/forwardport.yml index 48f007ddc..d1e591d00 100644 --- a/.github/workflows/forwardport.yml +++ b/.github/workflows/forwardport.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v4.1.4 with: fetch-depth: 0 diff --git a/.github/workflows/to-plain-html.yml b/.github/workflows/to-plain-html.yml index 2320157b9..b5ba50676 100644 --- a/.github/workflows/to-plain-html.yml +++ b/.github/workflows/to-plain-html.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.1.4 with: token: ${{ secrets.TO_HTML }} - name: Asciidoc to html diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml index 870597156..5c1f2a4c5 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/validate.yml @@ -13,8 +13,8 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: actions/setup-node@v4 + - uses: actions/checkout@v4.1.4 + - uses: actions/setup-node@v4.0.2 with: node-version: 20 - name: Check for broken internal links diff --git a/docs/antora.yml b/docs/antora.yml index 00e238734..2449db502 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -40,8 +40,8 @@ asciidoc: open-source-product-name: 'Community Edition' enterprise-product-name: 'Enterprise Edition' java-client-new: 'Java Client (Standalone)' - java-client: 'Java Client and Embedded Server' - url-cloud-signup: https://cloud.hazelcast.com/sign-up + java-client: 'Java Client and Embedded Server' + url-cloud-signup: https://cloud.hazelcast.com/sign-up hazelcast-cloud: Cloud ucn: User Code Namespaces ucd: User Code Deployment diff --git a/docs/modules/clients/pages/java.adoc b/docs/modules/clients/pages/java.adoc index f0707ab7a..ef803cd52 100644 --- a/docs/modules/clients/pages/java.adoc +++ b/docs/modules/clients/pages/java.adoc @@ -1,11 +1,10 @@ = Java Client :page-api-reference: https://docs.hazelcast.org/docs/{page-latest-supported-java-client}/javadoc :page-toclevels: 1 +:page-aliases: security:native-client-security.adoc :description: Hazelcast provides a {java-client} within the standard distribution you can start using right away, and also a lightweight {java-client-new} that is available in Beta. [[java-client]] -// check redirects - == Overview Hazelcast provides a {java-client} which you can use to connect to a Hazelcast cluster. `hazelcast-.jar` is bundled in the Hazelcast standard package, so just add `hazelcast-.jar` to your classpath and you can start using this client as if you are using the Hazelcast API. @@ -407,7 +406,7 @@ clientConfig.setClusterName("dev"); === Configure client security [blue]*Hazelcast {enterprise-product-name}* -You can define control mechanisms for clients to control authentication and authorisation. For more information, see xref:security:native-client-security.adoc[]. +You can define control mechanisms for clients to control authentication and authorisation. For more information, see xref:security:client-authorization.adoc[]. You can provide the Java client with an identity for cluster authentication. The identity of the connecting client is defined on the client side. Usually, there are no security realms on the clients; only the identity defined in the security configuration. @@ -1296,7 +1295,7 @@ You can configure the cluster routing mode to suit your requirements, as describ The following examples show the configuration for each cluster routing mode. NOTE: If your clients want to use temporary permissions defined in a member, see -xref:security:native-client-security.adoc#handling-permissions-when-a-new-member-joins[Handling Permissions]. +xref:security:client-authorization.adoc#handling-permissions-when-a-new-member-joins[Handling Permissions]. **Client ALL_MEMBERS routing** diff --git a/docs/modules/clients/pages/memcache.adoc b/docs/modules/clients/pages/memcache.adoc index 974afc0a8..b46c460e3 100644 --- a/docs/modules/clients/pages/memcache.adoc +++ b/docs/modules/clients/pages/memcache.adoc @@ -1,11 +1,12 @@ = Memcache Client +NOTE: Hazelcast Memcache Client only supports ASCII protocol. Binary Protocol is not supported. + A Memcache client written in any language can talk directly to a Hazelcast cluster. No additional configuration is required. -NOTE: Hazelcast Memcache Client only supports ASCII protocol. Binary Protocol is not supported. - -To be able to use a Memcache client, you must enable the Memcache client request listener service using either one of the following configuration options: +To be able to use a Memcache client, you must enable +the Memcache client request listener service using either one of the following configuration options: 1 - Using the `network` configuration element: diff --git a/docs/modules/cluster-performance/pages/performance-tips.adoc b/docs/modules/cluster-performance/pages/performance-tips.adoc index 8865c4f41..00659d26e 100644 --- a/docs/modules/cluster-performance/pages/performance-tips.adoc +++ b/docs/modules/cluster-performance/pages/performance-tips.adoc @@ -596,37 +596,26 @@ Here are the essential tips: * But it needs to be considered from the outset, as it affects architecture, performance and coding * Security can then be added before go-live without rework -TLS/SSL can have a significant impact on performance. There are a few ways to -increase the performance. - -The first thing that can be done is making sure that AES intrinsics are used. -Modern CPUs (2010 or newer Westmere) have hardware support for AES encryption/decryption -and the JIT automatically makes use of these AES intrinsics. They can also be -explicitly enabled using `-XX:+UseAES -XX:+UseAESIntrinsics`, -or disabled using `-XX:-UseAES -XX:-UseAESIntrinsics`. - -A lot of encryption algorithms make use of padding because they encrypt/decrypt in -fixed sized blocks. If there is no enough data -for a block, the algorithm relies on random number generation to pad. Under Linux, -the JVM automatically makes use of `/dev/random` for -the generation of random numbers. `/dev/random` relies on entropy to be able to +=== TLS Tuning + +You can improve TLS performance in a number of ways. + +Check if `securerandom.source` is configured to `/dev/urandom` in your +`/conf/security/java.security` file. +If there is `/dev/random` instead, it might block on operations which +require random data generation. +The `/dev/random` relies on entropy to be able to generate random numbers. However, if this entropy is insufficient to keep up with the rate requiring random numbers, it can slow down the encryption/decryption since `/dev/random` will -block; it could block for minutes waiting for sufficient entropy . This can be fixed -by setting the `-Djava.security.egd=file:/dev/./urandom` system property. +block. This can be fixed +by setting the `-Djava.security.egd=file:/dev/urandom` system property. For a more permanent solution, modify the -`/jre/lib/security/java.security` file, look for the -`securerandom.source=/dev/urandom` and change it -to `securerandom.source=file:/dev/./urandom`. Switching to `/dev/urandom` could -be controversial because `/dev/urandom` will not -block if there is a shortage of entropy and the returned random values could -theoretically be vulnerable to a cryptographic attack. -If this is a concern in your application, use `/dev/random` instead. - -Hazelcast's Java smart client automatically makes use of extra I/O threads -for encryption/decryption and this have a significant impact on the performance. -This can be changed using the `hazelcast.client.io.input.thread.count` and +`/conf/security/java.security` file and change directly the `securerandom.source` property value. + +Clients using Hazelcast's Java `ALL_MEMBERS` and `MULTI_MEMBER` cluster routing modes automatically make use of extra I/O threads +for encryption/decryption and this has a significant impact on the performance. +The number of threads used can be changed using the `hazelcast.client.io.input.thread.count` and `hazelcast.client.io.output.thread.count` client system properties. By default it is 1 input thread and 1 output thread. If TLS/SSL is enabled, it defaults to 3 input threads and 3 output threads. diff --git a/docs/modules/cluster-performance/pages/performance-tuning.adoc b/docs/modules/cluster-performance/pages/performance-tuning.adoc deleted file mode 100644 index bdc769dfc..000000000 --- a/docs/modules/cluster-performance/pages/performance-tuning.adoc +++ /dev/null @@ -1,511 +0,0 @@ -= Performance Tuning - -To achieve good performance in your Hazelcast deployment, it is crucial to tune your -production environment. This section provides guidelines for tuning the performance though we also -recommend to run performance and stress tests to evaluate the application performance. - -== Operating System Tuning - -=== Disabling Transparent Huge Pages (THP) - -Transparent Huge Pages (THP) is the Linux Memory Management -feature which aims to improve the application performance by -using the larger memory pages. In most of the cases it works fine -but for databases and in-memory data grids it usually causes a significant performance drop. -Since it's enabled on most of the Linux distributions, we do recommend disabling -it when you run Hazelcast. - -Use the following command to check if it's enabled: - -``` -cat /sys/kernel/mm/transparent_hugepage/enabled -cat /sys/kernel/mm/transparent_hugepage/defrag - -``` - -Or an alternative command if you run RHEL: - -``` -cat /sys/kernel/mm/redhat_transparent_hugepage/enabled -cat /sys/kernel/mm/redhat_transparent_hugepage/defrag -``` - -To disable it permanently, please see the corresponding documentation -for the Linux distribution that you use. Here is an example of the instructions -for RHEL: https://access.redhat.com/solutions/46111. - -=== Disabling Swap Usage - -Swapping behavior can be configured by setting the kernel parameter -(`/proc/sys/vm/swappiness`) and can be turned off completely by executing -`swapoff -a` as the root user in Linux systems. We highly recommend turning -off the swapping on the machines that run Hazelcast. When your operating systems -start swapping, garbage collection activities take much longer due to the low speed of disc access. - -The Linux kernel parameter, `vm.swappiness`, is a value from 0-100 that controls -the swapping of application data from physical memory to virtual memory on disk. -To prevent Linux kernel to start swapping memory to disk way too early, -we need to set the default of 60 to value between 0 and 10. -The higher the parameter value, the more aggressively inactive processes are -swapped out from physical memory. The lower the value, the less they are swapped, -forcing filesystem buffers to be emptied. In case swapping needs to be kept enabled, -we recommend setting the value between 0 and 10 to prevent the Linux kernel -to start swapping memory to disk way too early. - -``` -sudo sysctl vm.swappiness=10 -``` - -== Network Tuning - -=== Dedicated Network Interface Controller for Hazelcast Members - -Provisioning a dedicated physical network interface controller (NIC) for -Hazelcast members ensures smooth flow of data, including business -data and cluster health checks, across servers. Sharing network interfaces -between a Hazelcast member and another application could result in choking the port, -thus causing unpredictable cluster behavior. - -=== TCP Buffer Size - -TCP uses a congestion window to determine how many packets it -can send at one time; the larger the congestion window, the higher the throughput. -The maximum congestion window is related to the amount of buffer -space that the kernel allocates for each socket. For each socket, -there is a default value for the buffer size, which you can change by using -a system library call just before opening the socket. You can adjust -the buffer sizes for both the receiving and sending sides of a socket. - -To achieve maximum throughput, it is critical to use the optimal TCP -socket buffer sizes for the links you are using to transmit data. -If the buffers are too small, the TCP congestion window will never open up fully, -therefore throttling the sender. If the buffers are too large, -the sender can overrun the receiver such that the sending host is -faster than the receiving host, which causes the receiver to drop packets -and the TCP congestion window to shut down. - -Typically, you can determine the throughput by the following formulae: - -* Transaction per second = buffer size / latency -* Buffer size = Round trip time * network bandwidth - -Hazelcast, by default, configures I/O buffers to 128KB; you can change these -using the following Hazelcast properties: - -* `hazelcast.socket.receive.buffer.size` -* `hazelcast.socket.send.buffer.size` - -The operating system has separate configuration for minimum, default and maximum socket buffer sizes, so it is not guaranteed that the socket buffers allocated to Hazelcast sockets will match the requested buffer size. - -On Linux, the following kernel parameters can be used to configure socket buffer sizes: - -* `net.core.rmem_max`: maximum socket receive buffer size in bytes -* `net.core.wmem_max`: maximum socket send buffer size in bytes -* `net.ipv4.tcp_rmem`: minimum, default and maximum receive buffer size per TCP socket -* `net.ipv4.tcp_wmem`: minimum, default and maximum send buffer size per TCP socket - -To make a temporary change to one of these values, use `sysctl`: -``` -$ sysctl net.core.rmem_max=2097152 -$ sysctl net.ipv4.tcp_rmem="8192 131072 6291456" -``` - -To apply changes permanently, edit file `/etc/sysctl.conf` e.g.: - -``` -$ vi /etc/sysctl.conf -net.core.rmem_max = 2097152 -net.ipv4.tcp_rmem = 8192 131072 6291456 -``` - -Check your Linux distribution's documentation for more information about configuring kernel parameters. - -== Virtual Machine Tuning - -=== Garbage Collection - -Keeping track of garbage collection (GC) statistics is vital to optimum performance, -especially if you run the JVM with large heap sizes. Tuning the garbage collector -for your use case is often a critical performance practice prior to deployment. -Likewise, knowing what baseline GC behavior looks like and -monitoring for behavior outside normal tolerances will keep you aware of -potential memory leaks and other pathological memory usage. Hazelcast provides a basic -GC recommendation in our xref:ROOT:production-checklist.adoc#jvm-recommendations[JVM Recommendations]. - -=== Minimize Heap Usage - -The best way to minimize the performance impact of GC -is to keep heap usage small. Maintaining a small heap saves countless -hours of GC tuning and provides improved stability -and predictability across your entire application. -Even if your application uses very large amounts of data, you can still keep -your heap small by using Hazelcast's High-Density Memory Store. - -=== Enable GC Logging - -We xref:ROOT:production-checklist.adoc#jvm-recommendations[recommend] enabling -GC logs to allow troubleshooting if performance problems occur. To enable GC -logging, use the following JVM arguments: - -``` --Xlog:gc=debug:file=/tmp/gc.log:time,uptime,level,tags:filesize=100m,filecount=10 -``` - -=== Azul Zing® and Zulu® Support - -Azul Systems, the industry’s only company exclusively focused on -Java and the Java Virtual Machine (JVM), builds fully supported, -certified standards-compliant Java runtime solutions that help -enabling real-time business. Zing is a JVM designed for enterprise -Java applications and workloads that require any combination of low -latency, high transaction rates, large working memory, and/or consistent -response times. Zulu and Zulu Enterprise are Azul’s certified, freely available -open source builds of OpenJDK with a variety of flexible support options, -available in configurations for the enterprise as well as custom and embedded systems. -Azul Zing is certified and supported in Hazelcast {enterprise-product-name}. When deployed with Zing, -Hazelcast gains performance, capacity, and operational efficiency within the same infrastructure. -Additionally, you can directly use Hazelcast with Zulu without making any changes to your code. - -== Query Tuning - -=== Indexes for Queried Fields - -For queries on fields with ranges, you can use an ordered index. -Hazelcast, by default, caches the deserialized form of the object under -query in the memory when inserted into an index. This removes the overhead -of object deserialization per query, at the cost of increased heap usage. -See the xref:query:indexing-maps.adoc#indexing-ranged-queries[Indexing Ranged Queries section]. - -=== Composite Indexes - -Composite indexes are built on top of multiple map entry -attributes; thus, increase the performance of complex queries significantly -when used correctly. See the xref:query:indexing-maps.adoc#composite-indexes[Composite Indexes section] - -=== Parallel Query Evaluation & Query Thread Pool - -Setting the `hazelcast.query.predicate.parallel.evaluation` property -to `true` can speed up queries when using slow predicates or when there are huge -amount of entries per member. - -If you're using queries heavily, you can benefit from increasing query thread pools. -See the xref:query:querying-maps-predicates.adoc#configuring-the-query-thread-pool[Configuring the Query Thread Pool section]. - -=== In-Memory Format for Queries - -Setting the queried entries' in-memory format to `OBJECT` forces the objects -to be always kept in object format, resulting in faster access for queries, but also in -higher heap usage. It will also incur an object serialization step on every remote get operation. See the xref:data-structures:setting-data-format.adoc[Setting In-Memory Format section]. - -=== Portable Interface on Queried Objects - -The Portable interface allows individual fields to be accessed without -the overhead of deserialization or reflection and supports query and -indexing support without full-object deserialization. -See the related https://hazelcast.com/blog/for-faster-hazelcast-queries/[Hazelcast Blog] and the xref:serialization:implementing-portable-serialization.adoc[Portable Serialization section]. - -== Serialization Tuning - -Hazelcast supports a range of object serialization mechanisms, -each with their own costs and benefits. Choosing the best serialization -scheme for your data and access patterns can greatly increase the performance -of your cluster. - -For an overview of serialization options with comparative advantages and disadvantages, see xref:serialization:serialization.adoc[]. - -[[serialization-opt-recommendations]] -=== Serialization Optimization Recommendations - -* Use `IMap.set()` on maps instead of `IMap.put()` if you don’t -need the old value. This eliminates unnecessary deserialization of the old value. -* Set `use-native-byte-order` and `allow-unsafe` to `true` in Hazelcast's serialization configuration. -Setting these properties to `true` enables fast copy of primitive -arrays like `byte[]`, `long[]`, etc., in your object. -* Compression is supported only by `Serializable` and -`Externalizable`. It has not been applied to other serializable methods -because it is much slower (around three orders of magnitude slower than -not using compression) and consumes a lot of CPU. However, it can -reduce binary object size by an order of magnitude. -* When `enable-shared-object` is set to `true`, the Java serializer will -back-reference an object pointing to a previously serialized instance. -If set to `false`, every instance is considered unique and copied separately -even if they point to the same instance. The default configuration is false. - -See also the xref:serialization:serialization-configuration.adoc[Serialization Configuration Wrap-Up section] for details. - -[[exec-svc-opt]] -== Compute Tuning - -Hazelcast executor service is an extension of Java’s built-in executor service -that allows distributed execution and control of tasks. There are a number of -options for Hazelcast executor service that have an impact on performance as summarized below. - -=== Number of Threads - -An executor queue may be configured to have a specific number of -threads dedicated to executing enqueued tasks. Set the number of -threads (`pool-size` property in the executor service configuration) -appropriate to the number of cores available for execution. -Too few threads will reduce parallelism, leaving cores idle, while too -many threads will cause context switching overhead. -See the xref:computing:executor-service.adoc#configuring-executor-service[Configuring Executor Service section]. - -=== Bounded Execution Queue - -An executor queue may be configured to have a maximum number -of tasks (`queue-capacity` property in the executor service configuration). -Setting a bound on the number of enqueued tasks -will put explicit back pressure on enqueuing clients by throwing -an exception when the queue is full. This will avoid the overhead -of enqueuing a task only for it to be canceled because its execution -takes too long. It will also allow enqueuing clients to take corrective -action rather than blindly filling up work queues with tasks faster than they can be executed. -See the xref:computing:executor-service.adoc#configuring-executor-service[Configuring Executor Service section]. - -=== Avoid Blocking Operations in Tasks - -Any time spent blocking or waiting in a running task is thread -execution time wasted while other tasks wait in the queue. -Tasks should be written such that they perform no potentially -blocking operations (e.g., network or disk I/O) in their `run()` or `call()` methods. - -=== Locality of Reference - -By default, tasks may be executed on any member. Ideally, however, -tasks should be executed on the same machine that contains -the data the task requires to avoid the overhead of moving remote data to -the local execution context. Hazelcast executor service provides a number of -mechanisms for optimizing locality of reference. - -* Send tasks to a specific member: using `ExecutorService.executeOnMember()`, -you may direct execution of a task to a particular member -* Send tasks to a key owner: if you know a task needs to operate on a -particular map key, you may direct execution of that task to the member -that owns that key -* Send tasks to all or a subset of members: if, for example, you need to operate -on all the keys in a map, you may send tasks to all members such that each task -operates on the local subset of keys, then return the local result for -further processing - -=== Scaling Executor Services - -If you find that your work queues consistently reach their maximum -and you have already optimized the number of threads and locality -of reference, and removed any unnecessary blocking operations in your tasks, -you may first try to scale up the hardware of the overburdened members -by adding cores and, if necessary, more memory. - -When you have reached diminishing returns on scaling up -(such that the cost of upgrading a machine outweighs the benefits of the upgrade), -you can scale out by adding more members to your cluster. -The distributed nature of Hazelcast is perfectly suited to scaling out, -and you may find in many cases that it is as easy as just configuring and -deploying additional virtual or physical hardware. - -=== Executor Services Guarantees - -In addition to the regular distributed executor service, -Hazelcast also offers durable and scheduled executor services. -Note that when a member failure occurs, durable and scheduled executor -services come with "at least once execution of a task" guarantee, -while the regular distributed executor service has none. -See the xref:computing:durable-executor-service.adoc[Durable] and xref:computing:scheduled-executor-service.adoc[Scheduled] executor services. - -=== Work Queue Is Not Partitioned - -Each member-specific executor will have its own private work-queue. -Once a job is placed on that queue, it will not be taken by another member. -This may lead to a condition where one member has a lot of unprocessed -work while another is idle. This could be the result of an application -call such as the following: - -``` -for(;;){ - iexecutorservice.submitToMember(mytask, member) -} -``` - -This could also be the result of an imbalance caused by the application, -such as in the following scenario: all products by a particular manufacturer -are kept in one partition. When a new, very popular product gets released -by that manufacturer, the resulting load puts a huge pressure on that -single partition while others remain idle. - -=== Work Queue Has Unbounded Capacity by Default - -This can lead to `OutOfMemoryError` because the number of queued tasks -can grow without bounds. This can be solved by setting the `queue-capacity` property -in the executor service configuration. If a new task is submitted while the queue -is full, the call will not block, but will immediately throw a -`RejectedExecutionException` that the application must handle. - -=== No Load Balancing - -There is currently no load balancing available for tasks that can run -on any member. If load balancing is needed, it may be done by creating an -executor service proxy that wraps the one returned by Hazelcast. -Using the members from the `ClusterService` or member information from -`SPI:MembershipAwareService`, it could route "free" tasks to a specific member based on load. - -=== Destroying Executors - -An executor service must be shut down with care because it will -shut down all corresponding executors in every member and subsequent -calls to proxy will result in a `RejectedExecutionException`. -When the executor is destroyed and later a `HazelcastInstance.getExecutorService` -is done with the ID of the destroyed executor, a new executor will be created -as if the old one never existed. - -=== Exceptions in Executors - -When a task fails with an exception (or an error), this exception -will not be logged by Hazelcast by default. This comports with the -behavior of Java’s thread pool executor service, but it can make debugging difficult. -There are, however, some easy remedies: either add a try/catch in your runnable and -log the exception, or wrap the runnable/callable in a proxy that does the logging; -the last option keeps your code a bit cleaner. - -[[client-exec-pool-size]] -=== Client Executor Pool Size - -Hazelcast clients use an internal executor service -(different from the distributed executor service) to perform some of -its internal operations. By default, the thread pool for that executor service -is configured to be the number of cores on the client machine times five; e.g., on a 4-core -client machine, the internal executor service will have 20 threads. -In some cases, increasing that thread pool size may increase performance. - -[[ep]] -=== Entry Processors Performance Tuning - -Hazelcast allows you to update the whole or a -part of map or cache entries in an efficient and a lock-free way using -entry processors. - -By default the entry processor executes on a partition thread. A partition thread is responsible for handling -one or more partitions. The design of entry processor assumes users have fast user code execution of the `process()` method. -In the pathological case where the code is very heavy and executes in multi-milliseconds, this may create a bottleneck. - -We have a slow user code detector which can be used to log a warning -controlled by the following system properties: - -* `hazelcast.slow.operation.detector.enabled` (default: true) -* `hazelcast.slow.operation.detector.threshold.millis` (default: 10000) - -include::clusters:partial$ucn-migrate-tip.adoc[] - -The defaults catch extremely slow operations but you should set this -much lower, say to 1ms, at development time to catch entry processors -that could be problematic in production. These are good candidates for our optimizations. - -We have two optimizations: - -* `Offloadable` which moves execution off the partition thread to an executor thread -* `ReadOnly` which means we can avoid taking a lock on the key - -These are enabled very simply by implementing these interfaces in your entry processor. -These optimizations apply to the following map methods only: - -* `executeOnKey(Object, EntryProcessor)` -* `submitToKey(Object, EntryProcessor)` -* `submitToKey(Object, EntryProcessor, ExecutionCallback)` - -See the xref:data-structures:entry-processor.adoc[Entry Processors section]. - -[[tls-ssl-perf]] -== TLS/SSL Tuning - -TLS/SSL can have a significant impact on performance. There are a few ways to -increase the performance. - -The first thing that can be done is making sure that AES intrinsics are used. -Modern CPUs (2010 or newer Westmere) have hardware support for AES encryption/decryption -and the JIT automatically makes use of these AES intrinsics. They can also be -explicitly enabled using `-XX:+UseAES -XX:+UseAESIntrinsics`, -or disabled using `-XX:-UseAES -XX:-UseAESIntrinsics`. - -A lot of encryption algorithms make use of padding because they encrypt/decrypt in -fixed sized blocks. If there is no enough data -for a block, the algorithm relies on random number generation to pad. Under Linux, -the JVM automatically makes use of `/dev/random` for -the generation of random numbers. `/dev/random` relies on entropy to be able to -generate random numbers. However, if this entropy is -insufficient to keep up with the rate requiring random numbers, it can slow down -the encryption/decryption since `/dev/random` will -block; it could block for minutes waiting for sufficient entropy . This can be fixed -by setting the `-Djava.security.egd=file:/dev/./urandom` system property. -For a more permanent solution, modify the -`/jre/lib/security/java.security` file, look for the -`securerandom.source=/dev/urandom` and change it -to `securerandom.source=file:/dev/./urandom`. Switching to `/dev/urandom` could -be controversial because `/dev/urandom` will not -block if there is a shortage of entropy and the returned random values could -theoretically be vulnerable to a cryptographic attack. -If this is a concern in your application, use `/dev/random` instead. - -Clients using Hazelcast's Java `ALL_MEMBERS` and `MULTI_MEMBER` cluster routing modes automatically make use of extra I/O threads -for encryption/decryption and this has a significant impact on the performance. -Thee number of threads used can be changed using the `hazelcast.client.io.input.thread.count` and -`hazelcast.client.io.output.thread.count` client system properties. -By default it is 1 input thread and 1 output thread. If TLS/SSL is enabled, -it defaults to 3 input threads and 3 output threads. -Having more client I/O threads than members in the cluster does not lead to -an increased performance. So with a 2-member cluster, -2 in and 2 out threads give the best performance. - -[[hd]] -== High-Density Memory Store - -Hazelcast's High-Density Memory Store (HDMS) is an in-memory storage -option that uses native, off-heap memory to store object data -instead of the JVM heap. This allows you to keep data in the memory without -incurring the overhead of garbage collection (GC). HDMS capabilities are supported by -the map structure, JCache implementation, Near Cache, Hibernate caching, and Web Session replications. - -Available to Hazelcast {enterprise-product-name} customers, HDMS is an ideal solution -for those who want the performance of in-memory data, need the predictability -of well-behaved Java memory management, and don’t want to spend time -and effort on meticulous and fragile GC tuning. - -If you use HDMS with large data sizes, -we recommend a large increase in partition count, starting with 5009 or higher. See the -<> above for more information. Also, if you intend -to preload very large amounts of data into memory (tens, hundreds, or thousands of gigabytes), -be sure to profile the data load time and to take that startup time into account prior to deployment. - -See the xref:storage:high-density-memory.adoc[HDMS section] to learn more. - -[[many-members]] -== Clusters with Huge Amount of Members/Clients - -Very large clusters of hundreds of members are possible with Hazelcast, -but stability depends heavily on your network infrastructure and -ability to monitor and manage those many members. Distributed executions -in such an environment will be more sensitive to your application's -handling of execution errors, timeouts, and the optimization of task code. - -In general, you get better results with smaller clusters of Hazelcast members -running on more powerful hardware and a higher number of Hazelcast clients. -When running large numbers of clients, network stability is still a significant factor -in overall stability. If you are running in Amazon EC2, hosting clients -and members in the same zone is beneficial. Using Near Cache on read-mostly -data sets reduces server load and network overhead. You may also try increasing -the number of threads in the client executor pool. - -[[int-response-queue]] -== Setting Internal Response Queue Idle Strategies - -You can set the response thread for internal operations both on the members and clients. -By setting the backoff mode on and depending on the use case, you can get a -5-10% performance improvement. However, this increases the CPU utilization. -To enable backoff mode please set the following property for Hazelcast cluster members: - -``` --Dhazelcast.operation.responsequeue.idlestrategy=backoff -``` - -For Hazelcast clients, please use the following property to enable backoff: - -``` --Dhazelcast.client.responsequeue.idlestrategy=backoff -``` diff --git a/docs/modules/clusters/pages/deploying-code-from-clients.adoc b/docs/modules/clusters/pages/deploying-code-from-clients.adoc index 8343e5472..44bb5a84b 100644 --- a/docs/modules/clusters/pages/deploying-code-from-clients.adoc +++ b/docs/modules/clusters/pages/deploying-code-from-clients.adoc @@ -15,7 +15,7 @@ to the members when connecting. This way, when a client adds a new class, the members do not require a restart to include it in their classpath. You can also use the client permission policy to specify which clients -are permitted to use User Code Deployment. See the xref:security:native-client-security.adoc#permissions[Permissions section]. +are permitted to use User Code Deployment. See the xref:security:client-authorization.adoc#permissions[Permissions section]. [[configuring-client-user-code-deployment]] == Configuring Client User Code Deployment diff --git a/docs/modules/clusters/pages/ucn-security.adoc b/docs/modules/clusters/pages/ucn-security.adoc index 34bd756a8..064f26a0f 100644 --- a/docs/modules/clusters/pages/ucn-security.adoc +++ b/docs/modules/clusters/pages/ucn-security.adoc @@ -9,4 +9,4 @@ Permissions are set using the `UserCodeNamespacePermission` class, which extends For further information on the `UserCodeNamespacePermission` class, refer to https://docs.hazelcast.org/docs/{full-version}/javadoc/com/hazelcast/security/permission/UserCodeNamespacePermission.html[Class UserCodeNamespacePermission^] in the Java API documentation. -For further information on client permissions with {ucn}, see the xref:security:native-client-security.adoc[] topic. \ No newline at end of file +For further information on client permissions with {ucn}, see the xref:security:client-authorization.adoc[] topic. diff --git a/docs/modules/clusters/pages/ucn-static-config.adoc b/docs/modules/clusters/pages/ucn-static-config.adoc index 8bfb9a868..58e642003 100644 --- a/docs/modules/clusters/pages/ucn-static-config.adoc +++ b/docs/modules/clusters/pages/ucn-static-config.adoc @@ -14,7 +14,7 @@ To statically configure {ucn} for your user code, you must do the following: You can also configure a `default` namespace, which can be used to provide resources when no specific {ucn} have been configured. For example, a data structure without an associated namespace, a partition loss listener, or when you are using an API that is not namespace-aware. For further information on using the `default` namespace, see the xref:clusters:ucn-non-associated.adoc[] topic. -If you want to instantiate and register a customization that looks up code in a namespace, you must also configure the permissions. For further information on permissions, see the xref:clusters:ucn-security.adoc[] and xref:security:native-client-security.adoc[] topics. +If you want to instantiate and register a customization that looks up code in a namespace, you must also configure the permissions. For further information on permissions, see the xref:clusters:ucn-security.adoc[] and xref:security:client-authorization.adoc[] topics. == In the Hazelcast Configuration File @@ -25,4 +25,4 @@ To use the _hazelcast_ configuration file to configure {ucn}, see the following == Programatically -To configure {ucn} programatically, see the xref:clusters:ucn-static-programmatic.adoc[] topic. \ No newline at end of file +To configure {ucn} programatically, see the xref:clusters:ucn-static-programmatic.adoc[] topic. diff --git a/docs/modules/clusters/partials/ucn-migrate-tip.adoc b/docs/modules/clusters/partials/ucn-migrate-tip.adoc index 3b56e1023..88b7e6ec7 100644 --- a/docs/modules/clusters/partials/ucn-migrate-tip.adoc +++ b/docs/modules/clusters/partials/ucn-migrate-tip.adoc @@ -1 +1 @@ -CAUTION: {ucd} has been deprecated and will be removed in the next major version. To continue deploying your user code after this time, {open-source-product-name} users can either upgrade to {enterprise-product-name}, or add their resources to the Hazelcast member class paths. Hazelcast recommends that {enterprise-product-name} users migrate their user code to use {ucn} for all purposes other than Jet stream processing. For further information on migrating from {ucd} to {ucn}, see xref:clusters:ucn-migrate-ucd.adoc[]. \ No newline at end of file +CAUTION: {ucd} has been deprecated and will be removed in the next major version. To continue deploying your user code after this time, {open-source-product-name} users can either upgrade to {enterprise-product-name}, or add their resources to the Hazelcast member class paths. Hazelcast recommends that {enterprise-product-name} users migrate their user code to use {ucn}. For further information on migrating from {ucd} to {ucn}, see the xref:clusters:ucn-migrate-ucd.adoc[] topic. \ No newline at end of file diff --git a/docs/modules/configuration/pages/dynamic-config.adoc b/docs/modules/configuration/pages/dynamic-config.adoc index 749efbfc0..545d7aaf6 100644 --- a/docs/modules/configuration/pages/dynamic-config.adoc +++ b/docs/modules/configuration/pages/dynamic-config.adoc @@ -147,4 +147,4 @@ to be sent over the network to all cluster members, and their classes have to be == Limitations -Although you can configure members to xref:security:native-client-security.adoc#handling-permissions-when-a-new-member-joins[apply the client permissions of a new member], you can't use dynamic configuration to make changes to client permissions. +Although you can configure members to xref:security:client-authorization.adoc#handling-permissions-when-a-new-member-joins[apply the client permissions of a new member], you can't use dynamic configuration to make changes to client permissions. diff --git a/docs/modules/data-structures/pages/map-config.adoc b/docs/modules/data-structures/pages/map-config.adoc index 4b24f5c87..bee7e009c 100644 --- a/docs/modules/data-structures/pages/map-config.adoc +++ b/docs/modules/data-structures/pages/map-config.adoc @@ -4,7 +4,6 @@ {description} -[[map-configuration-defaults]] == Hazelcast Map Configuration Defaults The `hazelcast.xml`/`hazelcast.yaml` configuration included with your Hazelcast distribution includes the following default settings for maps. @@ -34,22 +33,9 @@ For details on map backups, refer to xref:backing-up-maps.adoc[]. For details on in-memory format, refer to xref:setting-data-format.adoc[]. -== The Default (Fallback) Map Configuration -When a map is created, if the map name matches an entry in the `hazelcast.xml`/`hazelcast.yaml` file, the values in the matching entry are used to overwrite the initial values -discussed in the <> section. +== Modifying the Default Configuration -Maps that do not have any configuration defined use the default configuration. If you want to set a configuration that is valid for all maps, you can name your configuration as `default`. A user-defined default configuration applies to every map that does not have a specific custom map configuration defined with the map’s name. You can also use wildcards to associate your configuration with multiple map names. See the [configuration documentation](https://docs.hazelcast.com/hazelcast/5.5/configuration/using-wildcards) for more information about wildcards. - -When a map name does not match any entry in the `hazelcast.xml`/`hazelcast.yaml` file then: - -- If the `default` map configuration exists, the values under this entry are used to overwrite initial values. Therefore, `default` serves as a fallback. - -- If a `default` map configuration does not exist, the map is created with initial values as discussed in <>. - - -== Modifying the Default (Fallback) Configuration - -In the following example, we set expiration timers for dynamically created maps that lack a named configuration block. Map entries that are idle for an hour will be marked as eligible for removal if the cluster begins to run out of memory. Any map entry older than six hours will be marked as eligible for removal. +You can create a default configuration for all maps for your environment by modifying the map configuration block named "default" in your `hazelcast.xml`/`hazelcast.yaml` file. In the following example, we set expiration timers for map entries. Map entries that are idle for an hour will be marked as eligible for removal if the cluster begins to run out of memory. Any map entry older than six hours will be marked as eligible for removal. For more on entry expiration, go to xref:managing-map-memory.adoc[Managing Map Memory]. diff --git a/docs/modules/integrate/pages/elasticsearch-connector.adoc b/docs/modules/integrate/pages/elasticsearch-connector.adoc index 291a423cf..c82a5bf88 100644 --- a/docs/modules/integrate/pages/elasticsearch-connector.adoc +++ b/docs/modules/integrate/pages/elasticsearch-connector.adoc @@ -14,7 +14,7 @@ Each module includes an Elasticsearch client that's compatible with the given ma == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, your clients may need permissions to use this connector. For details, see xref:pipelines:job-security.adoc[]. +If xref:security:enable-security.adoc[security] is enabled, your clients may need permissions to use this connector. For details, see xref:pipelines:job-security.adoc[]. == Elasticsearch as a Source diff --git a/docs/modules/integrate/pages/feature-engineering-with-feast.adoc b/docs/modules/integrate/pages/feature-engineering-with-feast.adoc index 11681984a..20caff7d1 100644 --- a/docs/modules/integrate/pages/feature-engineering-with-feast.adoc +++ b/docs/modules/integrate/pages/feature-engineering-with-feast.adoc @@ -16,7 +16,7 @@ image:ROOT:feast_batch.png[Feast batch wokflow] You will need the following ready before starting the tutorial: -* Hazelcast CLC (see link:https://docs.hazelcast.com/clc/latest/install-clc[Install CLC]) +* Hazelcast CLC. link:https://docs.hazelcast.com/clc/latest/install-clc[Installation instructions] * A recent version of Docker and Docker Compose To set up your project, complete the following steps: diff --git a/docs/modules/integrate/pages/file-connector.adoc b/docs/modules/integrate/pages/file-connector.adoc index 3d710dac3..bb0728e39 100644 --- a/docs/modules/integrate/pages/file-connector.adoc +++ b/docs/modules/integrate/pages/file-connector.adoc @@ -18,7 +18,7 @@ Depending on the <>, you may also == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can set up permissions to restrict clients' access to your files. For details, see xref:pipelines:job-security.adoc[]. +If xref:security:enable-security.adoc[security] is enabled, you can set up permissions to restrict clients' access to your files. For details, see xref:pipelines:job-security.adoc[]. == Supported File Systems diff --git a/docs/modules/integrate/pages/integrate-with-feast.adoc b/docs/modules/integrate/pages/integrate-with-feast.adoc index 79c2c8d38..6dfe43837 100644 --- a/docs/modules/integrate/pages/integrate-with-feast.adoc +++ b/docs/modules/integrate/pages/integrate-with-feast.adoc @@ -10,7 +10,7 @@ This approach unlocks the following to power your Feast real-world machine learn Feast creates a new IMap for each feature, which means that every feature view corresponds to an IMap in the Hazelcast cluster, and the entries in that IMap correspond to features of entitites. Each feature value is stored separately, and can be retrieved individually. * Hazelcast's inherent strengths, such as high availability, fault tolerance, and data distribution -* Support for a secure TLS/SSL connection to your Hazelcast online store +* Support for a secure TLS connection to your Hazelcast online store * The ability to set Time-to-Live (TTL) for features in your Hazelcast cluster == What is Feast? @@ -114,5 +114,5 @@ To use Feast with Hazelcast, you must do the following: You can also work through the following tutorials: -* xref:integrate:feature-engineering-with-feast.adoc[Get started with Feast streaming] -* xref:integrate:streaming-features-with-feast.adoc[Get started with Feast feature engineering] +* Get Started with Feature Store +* Feature Compute and Transformation diff --git a/docs/modules/integrate/pages/jcache-connector.adoc b/docs/modules/integrate/pages/jcache-connector.adoc index 03862bfb9..5d323a8ff 100644 --- a/docs/modules/integrate/pages/jcache-connector.adoc +++ b/docs/modules/integrate/pages/jcache-connector.adoc @@ -11,4 +11,4 @@ distributions of Hazelcast. == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can set up permissions to restrict clients' access to these data structures. For details, see xref:security:native-client-security.adoc[]. \ No newline at end of file +If xref:security:enable-security.adoc[security] is enabled, you can set up permissions to restrict clients' access to these data structures. For details, see xref:security:client-authorization.adoc[]. diff --git a/docs/modules/integrate/pages/kafka-connect-connectors.adoc b/docs/modules/integrate/pages/kafka-connect-connectors.adoc index 89f260230..1c2f99ae6 100644 --- a/docs/modules/integrate/pages/kafka-connect-connectors.adoc +++ b/docs/modules/integrate/pages/kafka-connect-connectors.adoc @@ -49,7 +49,7 @@ Every Kafka Connect Source connector comes with documentation that includes the == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, your clients may need updated permissions to upload the ZIP or JAR file used by the Kafka Connect Source Connector. For details, see xref:pipelines:job-security.adoc[]. +If xref:security:enable-security.adoc[security] is enabled, your clients may need updated permissions to upload the ZIP or JAR file used by the Kafka Connect Source Connector. For details, see xref:pipelines:job-security.adoc[]. == Adding the Connector Configuration diff --git a/docs/modules/integrate/pages/kafka-connector.adoc b/docs/modules/integrate/pages/kafka-connector.adoc index 2ea9553d5..e4e102256 100644 --- a/docs/modules/integrate/pages/kafka-connector.adoc +++ b/docs/modules/integrate/pages/kafka-connector.adoc @@ -17,7 +17,7 @@ If you're using the slim distribution, you must add the link:https://mvnreposito == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, your clients may need permissions to use this connector. For details, see xref:pipelines:job-security.adoc[]. +If xref:security:enable-security.adoc[security] is enabled, your clients may need permissions to use this connector. For details, see xref:pipelines:job-security.adoc[]. == Configuration Options diff --git a/docs/modules/integrate/pages/legacy-file-connector.adoc b/docs/modules/integrate/pages/legacy-file-connector.adoc index e4f81fb9c..d3d4f6172 100644 --- a/docs/modules/integrate/pages/legacy-file-connector.adoc +++ b/docs/modules/integrate/pages/legacy-file-connector.adoc @@ -22,7 +22,7 @@ To access Hadoop or any of the cloud-based file systems, add one of the download == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can set up permissions to restrict clients' access to your files. For details, see xref:pipelines:job-security.adoc[]. +If xref:security:enable-security.adoc[security] is enabled, you can set up permissions to restrict clients' access to your files. For details, see xref:pipelines:job-security.adoc[]. == Supported File Systems diff --git a/docs/modules/integrate/pages/list-connector.adoc b/docs/modules/integrate/pages/list-connector.adoc index ae797c976..a380d6949 100644 --- a/docs/modules/integrate/pages/list-connector.adoc +++ b/docs/modules/integrate/pages/list-connector.adoc @@ -11,7 +11,7 @@ This connector is included in the full and slim distributions of Hazelcast. == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can set up permissions to restrict clients' access to these data structures. For details, see xref:security:native-client-security.adoc[]. +If xref:security:enable-security.adoc[security] is enabled, you can set up permissions to restrict clients' access to these data structures. For details, see xref:security:client-authorization.adoc[]. == List as a Source or Sink @@ -30,4 +30,4 @@ p.readFrom(Sources.list(inputList)) ``` NOTE: List isn't suitable to use as a streaming sink because items are always -appended and eventually the member will run out of memory. \ No newline at end of file +appended and eventually the member will run out of memory. diff --git a/docs/modules/integrate/pages/map-connector.adoc b/docs/modules/integrate/pages/map-connector.adoc index fcb21722e..e75eab758 100644 --- a/docs/modules/integrate/pages/map-connector.adoc +++ b/docs/modules/integrate/pages/map-connector.adoc @@ -11,11 +11,11 @@ distributions of Hazelcast. == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can set up permissions to restrict clients' access to these data structures. +If xref:security:enable-security.adoc[security] is enabled, you can set up permissions to restrict clients' access to these data structures. For example, to read from map sources, you must add the `create` and `read` permissions for those maps. If you use the map connector to write to map sinks, you must add the `create` and `put` permissions for those maps. -For details, see xref:security:native-client-security.adoc[]. +For details, see xref:security:client-authorization.adoc[]. == Map as a Batch Source @@ -204,4 +204,4 @@ p.readFrom(Sources.map(personCache, Predicates.greaterEqual("age", 21), Projections.singleAttribute("name")) ); -``` \ No newline at end of file +``` diff --git a/docs/modules/integrate/pages/reliable-topic-connector.adoc b/docs/modules/integrate/pages/reliable-topic-connector.adoc index ec4d53306..916fa4d40 100644 --- a/docs/modules/integrate/pages/reliable-topic-connector.adoc +++ b/docs/modules/integrate/pages/reliable-topic-connector.adoc @@ -5,12 +5,13 @@ used as a data sink within a pipeline. == Installing the Connector -This connector is included in the full and slim distributions of Hazelcast. +The map connector is included in the full and slim +distributions of Hazelcast. == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can set up permissions to restrict clients' access to these data structures. For details, see xref:security:native-client-security.adoc[]. +If xref:security:enable-security.adoc[security] is enabled, you can set up permissions to restrict clients' access to these data structures. For details, see xref:security:client-authorization.adoc[]. == Reliable Topic as a Source or Sink diff --git a/docs/modules/integrate/pages/streaming-features-with-feast.adoc b/docs/modules/integrate/pages/streaming-features-with-feast.adoc index a6fa4f333..8b33311a4 100644 --- a/docs/modules/integrate/pages/streaming-features-with-feast.adoc +++ b/docs/modules/integrate/pages/streaming-features-with-feast.adoc @@ -15,7 +15,7 @@ image:ROOT:feast_streaming.png[Feast streaming wokflow] You will need the following ready before starting the tutorial: -* Hazelcast CLC (see link:https://docs.hazelcast.com/clc/latest/install-clc[Install CLC]) +* Hazelcast CLC - link:https://docs.hazelcast.com/clc/latest/install-clc[Installation instructions] * A recent version of Docker and Docker Compose To set up your project, complete the following steps: @@ -349,7 +349,6 @@ Outputs something similar to: ] } ---- - == Summary In this tutorial, you learned how to set up a feature engineering project that uses Hazelcast as the online store. diff --git a/docs/modules/integrate/pages/vector-collection-connector.adoc b/docs/modules/integrate/pages/vector-collection-connector.adoc index 93251011a..9877c6fa2 100644 --- a/docs/modules/integrate/pages/vector-collection-connector.adoc +++ b/docs/modules/integrate/pages/vector-collection-connector.adoc @@ -12,11 +12,11 @@ For further information on vector collections, see xref:data-structures:vector-c This connector is included in the full and slim {enterprise-product-name} distributions of Hazelcast. == Permissions -If xref:security:enabling-jaas.adoc[security] is enabled, you can set up permissions to restrict clients' access to these data structures. +If xref:security:enable-security.adoc[security] is enabled, you can set up permissions to restrict clients' access to these data structures. To search in vector collection, you must add the `create` and `read` permissions for those collections. If you use the vector collection sink to write to vector collections, you must add the `create` and `put` permissions for those collections. -For further information on adding these permissions, see xref:security:native-client-security.adoc[]. +For further information on adding these permissions, see xref:security:client-authorization.adoc[]. == Vector Collection as a Sink diff --git a/docs/modules/maintain-cluster/pages/enterprise-rest-api.adoc b/docs/modules/maintain-cluster/pages/enterprise-rest-api.adoc index 074f71841..d417ef4cc 100644 --- a/docs/modules/maintain-cluster/pages/enterprise-rest-api.adoc +++ b/docs/modules/maintain-cluster/pages/enterprise-rest-api.adoc @@ -215,10 +215,10 @@ You must obtain a token to authenticate against the REST server. For more info, == HTTPS Support -You can configure TLS/SSL in the REST API. We use Spring Boot underneath to enable Spring web services, and the TLS/SSL related options correspond to the underlying Spring Boot TLS/SSL support capabilities. +You can configure TLS in the REST API. We use Spring Boot underneath to enable Spring web services, and the TLS related options correspond to the underlying Spring Boot TLS support capabilities. -The TLS/SSL configuration for the REST server is configured differently to the normal Hazelcast TLS/SSL configuration. -For an example of the TLS/SSL configuration settings for REST, see the following code sample: +The TLS configuration for the REST server is configured differently to the normal Hazelcast TLS configuration. +For an example of the TLS configuration settings for REST, see the following code sample: [tabs] ==== diff --git a/docs/modules/maintain-cluster/pages/rest-api.adoc b/docs/modules/maintain-cluster/pages/rest-api.adoc index 693bc9826..8786aca00 100644 --- a/docs/modules/maintain-cluster/pages/rest-api.adoc +++ b/docs/modules/maintain-cluster/pages/rest-api.adoc @@ -430,7 +430,7 @@ NOTE: Some of the REST calls listed below need their REST endpoint groups to be See the <> on how to enable them. Also note that the value of `$\{PASSWORD}` in the following calls is checked only if -the security is xref:security:enabling-jaas.adoc[enabled] in Hazelcast, i.e., if you have Hazelcast {enterprise-product-name}. +the security is xref:security:enable-security.adoc[enabled] in Hazelcast, i.e., if you have Hazelcast {enterprise-product-name}. If the security is disabled, the `$\{PASSWORD}` can be left empty. [cols="5a"] @@ -802,7 +802,7 @@ belonging to the `HEALTH_CHECK` endpoint group. As mentioned previously in this section, REST API is disabled by default and this is for security reasons. Once it is enabled for a given endpoint group, some endpoints belonging to that group can be called by any application. -REST API does not check xref:security:native-client-security.adoc#permissions[permissions], that you may configure for the other clients. +REST API does not check xref:security:client-authorization.adoc#permissions[permissions], that you may configure for the other clients. If you set permissions for the REST API, keep in mind that they will not be enforced. On the other hand, you can request authentications for various REST endpoints. These are the following: diff --git a/docs/modules/mapstore/pages/configuring-a-generic-maploader.adoc b/docs/modules/mapstore/pages/configuring-a-generic-maploader.adoc index 4752a4731..29f7d15ff 100644 --- a/docs/modules/mapstore/pages/configuring-a-generic-maploader.adoc +++ b/docs/modules/mapstore/pages/configuring-a-generic-maploader.adoc @@ -1,37 +1,16 @@ -= Using the generic MapLoader += Using the Generic MapLoader :description: With the xref:working-with-external-data.adoc#options[generic MapLoader], you can configure a map to cache data from an external system. This topic includes an example of how to configure a map with a generic MapLoader that connects to a MySQL database. :page-beta: false {description} -NOTE: The objects created in the distributed map are stored as GenericRecord. You can use the `type-name` property to store the data in a POJO (Plain Old Java Object). - For a list of all supported external systems, including databases, see available xref:external-data-stores:external-data-stores.adoc#connectors[data connection types]. -== Before you begin +== Before you Begin You need a xref:external-data-stores:external-data-stores.adoc[data connection] that's configured on all cluster members. -== Add dependencies - -If you are using a Hazelcast JAR file, you need to ensure the following is added to your classpath: - -[source,xml] ----- - - com.hazelcast - hazelcast-sql - - - - com.hazelcast - hazelcast-mapstore - ----- - -NOTE: If you are using the slim distribution, you need to add `hazelcast-mapstore`. If you are using MongoDb, you also need to add `hazelcast-jet-mongodb`. - -== Quickstart configuration +== Quickstart Configuration This example shows a basic map configuration that uses a data connection called `my-mysql-database`. See xref:data-structures:map.adoc[] for the details of other properties that you can include in your map configuration. @@ -91,11 +70,11 @@ instance().getConfig().addMapConfig(mapConfig); <2> The name of your data connection. [[mapping]] -== SQL mapping for the generic MapLoader +== SQL Mapping for the Generic MapLoader -When you configure a map with the generic MapLoader, Hazelcast creates a xref:sql:mapping-to-jdbc.adoc[SQL mapping with the JDBC connector]. The name of the mapping is the same name as your map prefixed with `__map-store.`. This mapping is used to read data from the external system, and is removed whenever the configured map is removed. You can also configure this SQL mapping, using <>. +When you configure a map with the generic MapLoader, Hazelcast creates a xref:sql:mapping-to-jdbc.adoc[SQL mapping with the JDBC connector]. The name of the mapping is the same name as your map prefixed with `__map-store.`. This mapping is used to read data from the external system, and it is removed whenever the configured map is removed. You can also configure this SQL mapping, using <>. -== Configuration properties for the generic MapLoader +== Configuration Properties for the Generic MapLoader These configuration properties allow you to configure the generic MapLoader and its SQL mapping. @@ -394,82 +373,26 @@ mapConfig.setMapStoreConfig(mapStoreConfig); -- ==== -|[[columns]]`type-name` -|The type name of the compact GenericRecord. Use this property to map your record to an existing domain class. - -| -The name of the map. -| - -[tabs] -==== -XML:: -+ --- -[source,xml] ----- - - - - com.hazelcast.mapstore.GenericMapStore - - my-mysql-database - org.example.Person - - - ----- --- -YAML:: -+ --- -[source,yaml] ----- -hazelcast: - map: - mymapname: - map-store: - enabled: true - class-name: com.hazelcast.mapstore.GenericMapStore - properties: - data-connection-ref: my-mysql-database - type-name: org.example.Person ----- --- -Java:: -+ --- -[source,java] ----- -MapConfig mapConfig = new MapConfig("myMapName"); - -MapStoreConfig mapStoreConfig = new MapStoreConfig(); -mapStoreConfig.setClassName("com.hazelcast.mapstore.GenericMapStore"); -mapStoreConfig.setProperty("data-connection-ref", "my-mysql-database"); -mapStoreConfig.setProperty("type-name", "org.example.Person"); - -mapConfig.setMapStoreConfig(mapStoreConfig); ----- --- -==== - |=== == Supported backends -The generic MapStore needs a SQL Connector that supports `SELECT`, `UPDATE`, `SINK INTO` and `DELETE` statements. +GenericMapStore needs a SQL Connector that supports `SELECT`, `UPDATE`, `SINK INTO` and `DELETE` statements. Officially supported connectors: -- MySQL, PostgreSQL, Microsoft SQL Server, Oracle (it uses JDBC SQL Connector). -- MongoDB (make sure you have `hazelcast-jet-mongodb` artifact included on the classpath). +- JDBC Connector + * supports MySQL, PostgreSQL. + * requires JDBC driver on the classpath +- MongoDB Connector + * make sure you have `hazelcast-jet-mongodb` artifact included on the classpath. -== Related resources +== Related Resources - To monitor MapStores for each loaded entry, use the `EntryLoadedListener` interface. See the xref:events:object-events.adoc#listening-for-map-events[Listening for Map Events section] to learn how you can catch entry-based events. - xref:mapstore-triggers.adoc[]. -== Next steps +== Next Steps -See the xref:configuration-guide.adoc[MapStore configuration guide] for details about configuration options, including caching behaviors. +See the MapStore xref:configuration-guide.adoc[configuration guide] for details about configuration options, including caching behaviors. diff --git a/docs/modules/mapstore/pages/configuring-a-generic-mapstore.adoc b/docs/modules/mapstore/pages/configuring-a-generic-mapstore.adoc index 77f146d94..e36b33f78 100644 --- a/docs/modules/mapstore/pages/configuring-a-generic-mapstore.adoc +++ b/docs/modules/mapstore/pages/configuring-a-generic-mapstore.adoc @@ -1,37 +1,16 @@ -= Using the generic MapStore += Using the Generic MapStore :description: With the xref:working-with-external-data.adoc#options[generic MapStore], you can configure a map to cache data from and write data back to an external system. This topic includes an example of how to configure a map with a generic MapStore that connects to a MySQL database. :page-beta: false {description} -NOTE: The objects created in the distributed map are stored as GenericRecord. You can use the `type-name` property to store the data in a POJO (Plain Old Java Object). - For a list of all supported external systems, including databases, see available xref:external-data-stores:external-data-stores.adoc#connectors[data connection types]. -== Before you begin +== Before you Begin You need a xref:external-data-stores:external-data-stores.adoc[data connection] that's configured on all cluster members. -== Add dependencies - -If you are using a Hazelcast JAR file, you need to ensure the following is added to your classpath: - -[source,xml] ----- - - com.hazelcast - hazelcast-sql - - - - com.hazelcast - hazelcast-mapstore - ----- - -NOTE: If you are using the slim distribution, you need to add `hazelcast-mapstore`. If you are using MongoDb, you also need to add `hazelcast-jet-mongodb`. - -== Quickstart configuration +== Quickstart Configuration This example shows a basic map configuration that uses a data connection called `my-mysql-database`. See xref:data-structures:map.adoc[] for the details of other properties that you include in your map configuration. @@ -91,11 +70,11 @@ instance().getConfig().addMapConfig(mapConfig); <2> The name of your data connection. [[mapping]] -== SQL mapping for the generic MapStore +== SQL Mapping for the Generic MapStore -When you configure a map with the generic MapStore, Hazelcast creates a xref:sql:mapping-to-jdbc.adoc[SQL mapping with the JDBC connector]. The name of the mapping is the same name as your map prefixed with `__map-store.`. This mapping is used to read data from or write data to the external system and is removed whenever the configured map is removed. You can also configure this SQL mapping, using <>. +When you configure a map with the generic MapStore, Hazelcast creates a xref:sql:mapping-to-jdbc.adoc[SQL mapping with the JDBC connector]. The name of the mapping is the same name as your map prefixed with `__map-store.`. This mapping is used to read data from or write data to the external system and it is removed whenever the configured map is removed. You can also configure this SQL mapping, using <>. -== Configuration properties for the generic MapStore +== Configuration Properties for the Generic MapStore These configuration properties allow you to configure the generic MapStore and its SQL mapping. @@ -394,77 +373,18 @@ mapConfig.setMapStoreConfig(mapStoreConfig); -- ==== -|[[columns]]`type-name` -|The type name of the compact GenericRecord. Use this property to map your record to an existing domain class. - -| -The name of the map. -| - -[tabs] -==== -XML:: -+ --- -[source,xml] ----- - - - - com.hazelcast.mapstore.GenericMapStore - - my-mysql-database - org.example.Person - - - ----- --- -YAML:: -+ --- -[source,yaml] ----- -hazelcast: - map: - mymapname: - map-store: - enabled: true - class-name: com.hazelcast.mapstore.GenericMapStore - properties: - data-connection-ref: my-mysql-database - type-name: org.example.Person ----- --- -Java:: -+ --- -[source,java] ----- -MapConfig mapConfig = new MapConfig("myMapName"); - -MapStoreConfig mapStoreConfig = new MapStoreConfig(); -mapStoreConfig.setClassName("com.hazelcast.mapstore.GenericMapStore"); -mapStoreConfig.setProperty("data-connection-ref", "my-mysql-database"); -mapStoreConfig.setProperty("type-name", "org.example.Person"); - -mapConfig.setMapStoreConfig(mapStoreConfig); ----- --- -==== - |=== == Supported backends -The generic MapStore needs a SQL Connector that supports `SELECT`, `UPDATE`, `SINK INTO` and `DELETE` statements. +You can use any database as the MapStore backend as long as you have its Hazelcast SQL Connector on the classpath. -Officially supported connectors: +Officially supported backend databases: - MySQL, PostgreSQL, Microsoft SQL Server, Oracle (it uses JDBC SQL Connector). - MongoDB (make sure you have `hazelcast-jet-mongodb` artifact included on the classpath). -== Related resources +== Related Resources - To monitor MapStores for each loaded entry, use the `EntryLoadedListener` interface. See the xref:events:object-events.adoc#listening-for-map-events[Listening for Map Events section] to learn how you can catch entry-based events. @@ -472,4 +392,4 @@ Officially supported connectors: == Next Steps -See the xref:configuration-guide.adoc[MapStore configuration guide] for details about configuration options, including caching behaviors. +See the MapStore xref:configuration-guide.adoc[configuration guide] for details about configuration options, including caching behaviors. diff --git a/docs/modules/pipelines/pages/cdc-join.adoc b/docs/modules/pipelines/pages/cdc-join.adoc index d5fca7f64..c287bbb5b 100644 --- a/docs/modules/pipelines/pages/cdc-join.adoc +++ b/docs/modules/pipelines/pages/cdc-join.adoc @@ -622,6 +622,8 @@ You should see the following jars: . Enable user code deployment: + +include::clusters:partial$ucn-migrate-tip.adoc[] ++ Due to the type of sink we are using in our pipeline we need to make some extra changes in order for the Hazelcast cluster to be aware of the custom classes we have defined. + diff --git a/docs/modules/pipelines/pages/job-security.adoc b/docs/modules/pipelines/pages/job-security.adoc index dd5836a28..78d83d166 100644 --- a/docs/modules/pipelines/pages/job-security.adoc +++ b/docs/modules/pipelines/pages/job-security.adoc @@ -134,7 +134,7 @@ HazelcastInstance instance = Hazelcast.newHazelcastInstance(config); == Controlling Access to Jobs -In Hazelcast {enterprise-product-name}, you can restrict access to jobs, using the following xref:security:native-client-security.adoc[client permissions]: +In Hazelcast {enterprise-product-name}, you can restrict access to jobs, using the following xref:security:client-authorization.adoc[client permissions]: - Job permissions: Restrict what clients can do with jobs and SQL queries. - Connector permissions: Restrict read and write access for each connector. @@ -241,4 +241,4 @@ Traditionally, this is done by enabling authentication on the external system an == Related Resources -For information about client permissions, see xref:security:native-client-security.adoc[]. \ No newline at end of file +For information about client permissions, see xref:security:client-authorization.adoc[]. diff --git a/docs/modules/release-notes/pages/5-4-0.adoc b/docs/modules/release-notes/pages/5-4-0.adoc index 74e209e60..3588885f9 100644 --- a/docs/modules/release-notes/pages/5-4-0.adoc +++ b/docs/modules/release-notes/pages/5-4-0.adoc @@ -140,7 +140,7 @@ https://github.com/hazelcast/hazelcast/pull/26058[#26058] https://github.com/hazelcast/hazelcast/pull/25529[#25529] * Improved the permission checks in the file connectors by adding a method that returns the permissions required to resolve field names. https://github.com/hazelcast/hazelcast/pull/25348[#25348] -* Added support for permission subtraction (deny permissions) in client connections. See xref:security:native-client-security.adoc#deny-permissions[Deny Permissions]. +* Added support for permission subtraction (deny permissions) in client connections. See xref:security:client-authorization.adoc#deny-permissions[Deny Permissions]. https://github.com/hazelcast/hazelcast/pull/25154[#25154] * Added the boolean `forceCertValidation` property to the security configuration to initiate a remote certificate validity check. #6235 diff --git a/docs/modules/secure-cluster/pages/hardening-recommendations.adoc b/docs/modules/secure-cluster/pages/hardening-recommendations.adoc index a99932985..a42b37f29 100644 --- a/docs/modules/secure-cluster/pages/hardening-recommendations.adoc +++ b/docs/modules/secure-cluster/pages/hardening-recommendations.adoc @@ -47,7 +47,7 @@ See the xref:clusters:network-configuration.adoc#outbound-ports[Outbound Ports s * Hazelcast allows you to intercept every remote operation executed by the client. This lets you add a very flexible custom security logic. See the xref:security:security-interceptor.adoc[Security Interceptor section] for more information. * Hazelcast by default transmits data between clients and members, and members and members in plain text. -This configuration is not secure; you should enable TLS/SSL. See the xref:security:tls-ssl.adoc[TLS/SSL section]. +This configuration is not secure; you should enable TLS. See the xref:security:tls-ssl.adoc[TLS section]. * With TLS Security, the keystore is used. The keystore password is in the `hazelcast.xml/yaml` configuration file, and, if clients are used, also in the `hazelcast-client.xml/yaml`. Access to these files should be restricted. * You can use a custom trust store by setting the trust store path in the TLS/SSL configuration, which then avoids using the default trust store. @@ -60,4 +60,4 @@ packages which are allowed for deserialization. * Hazelcast uses Java reflection during SQL execution when the object format is set to `java`. We recommend using xref:sql:sql-reflection-configuration.adoc#configuring-reflection[Java reflection filter configuration] to whitelist the set of trusted classes or packages that are allowed to create through reflection. * You can disable script executions on the Hazelcast members. Scripts executed from Management center have access to system resources (files, etc.) with privileges of user running Hazelcast. -We recommend that scripting be xref:maintain-cluster:monitoring.adoc#toggle-scripting-support[disabled] on members. \ No newline at end of file +We recommend that scripting be xref:maintain-cluster:monitoring.adoc#toggle-scripting-support[disabled] on members. diff --git a/docs/modules/secure-cluster/pages/security-defaults.adoc b/docs/modules/secure-cluster/pages/security-defaults.adoc index c0d05df54..191206344 100644 --- a/docs/modules/secure-cluster/pages/security-defaults.adoc +++ b/docs/modules/secure-cluster/pages/security-defaults.adoc @@ -29,3 +29,21 @@ If you are using Hazelcast on Docker and Kubernetes environments: * Since these environments don’t allow any access unless specified explicitly, all the features are enabled in the Hazelcast distributions on these cloud environments. +== Defaults by distribution type + +The table shows which security hardening features are used by default in the given distribution type. + +[options="header",cols="6,^1,^1,^1,^1"] +|===================================================================================================== +| Feature | ZIP/TAR Binaries | Homebrew/Debian/RPM | Maven/JAR | Docker +| Bind to localhost only | ✅ | ✅ | ❌ | ❌ +| Multicast discovery method disabled | ✅ | ✅ | ❌ | ❌ +| Advanced networking enabled | ❌ | ❌ | ❌ | ❌ +| Jet (and SQL) disabled | ❌ | ❌ | ✅ | ❌ +| Jet resource upload disabled | ❌ | ❌ | ✅ | ❌ +| User code deployment disabled | ✅ | ✅ | ✅ | ✅ +| REST health-check disabled | ❌ | ❌ | ✅ | ❌ +| Management Center scripting disallowed | ✅ | ✅ | ✅ | ✅ +| Management Center access to ConsoleApp disabled | ✅ | ✅ | ✅ | ✅ +| Management Center access from a specific IP only | ❌ | ❌ | ❌ | ❌ +|===================================================================================================== diff --git a/docs/modules/secure-cluster/partials/nav.adoc b/docs/modules/secure-cluster/partials/nav.adoc index 809739ba8..ce75fc858 100644 --- a/docs/modules/secure-cluster/partials/nav.adoc +++ b/docs/modules/secure-cluster/partials/nav.adoc @@ -1,24 +1,27 @@ * Securing a Cluster ** xref:security:overview.adoc[] -** xref:secure-cluster:security-defaults.adoc[] -** xref:secure-cluster:hardening-recommendations.adoc[] -** xref:security:enabling-jaas.adoc[] -** TLS/SSL +** TLS *** xref:security:tls-ssl.adoc[] *** xref:security:integrating-openssl.adoc[] *** xref:security:tls-configuration.adoc[] -** Authentication Types -*** xref:security:default-authentication.adoc[] +** xref:secure-cluster:security-defaults.adoc[] +** xref:security:enable-security.adoc[] +** Authentication +*** xref:security:authentication-overview.adoc[] *** xref:security:simple-authentication.adoc[] +*** xref:security:ldap-authentication.adoc[] +*** xref:security:kerberos-authentication.adoc[] +*** xref:security:tls-authentication.adoc[] +*** xref:security:identity-configuration.adoc[] *** xref:security:jaas-authentication.adoc[] -** xref:security:security-realms.adoc[] -** xref:security:cluster-member-security.adoc[] -** xref:security:native-client-security.adoc[] -** xref:security:socket-interceptor.adoc[] -** xref:security:security-interceptor.adoc[] +*** xref:security:default-authentication.adoc[] +** xref:security:client-authorization.adoc[] ** Advanced Security Features *** xref:security:logging-auditable-events.adoc[] *** xref:security:validating-secrets.adoc[] *** xref:security:fips-140-2.adoc[] *** xref:security:security-debugging.adoc[] *** xref:security:encryption.adoc[] +*** xref:security:socket-interceptor.adoc[] +*** xref:security:security-interceptor.adoc[] +** xref:secure-cluster:hardening-recommendations.adoc[] diff --git a/docs/modules/security/pages/authentication-overview.adoc b/docs/modules/security/pages/authentication-overview.adoc new file mode 100644 index 000000000..6cf71bd66 --- /dev/null +++ b/docs/modules/security/pages/authentication-overview.adoc @@ -0,0 +1,255 @@ += Authentication overview +:page-enterprise: true +:page-aliases: security-realms.adoc + +NOTE: Authentication is the process of verifying the identity of a user, system, or entity before granting access to resources or services. It ensures that the person or system requesting access is who they claim to be, typically through credentials like passwords, biometrics, tokens, or multi-factor methods. Authentication is a critical security step in protecting data and systems from unauthorized access. + +In Hazelcast, Authentication is used to verify the incoming connection has valid credentials configured. +Hazelcast supports several authentication types that can be configured for member-to-member, and client-to-member communication: + +* xref:simple-authentication.adoc[Simple] - users and roles are configured directly within the member configuration +* xref:ldap-authentication.adoc[LDAP] - LDAP server is used to verify credentials and load roles +* xref:kerberos-authentication.adoc[Kerberos] - service tickets are used for authentication +* xref:tls-authentication.adoc[TLS] - information from client-side TLS certificates (when TLS mutual authentication is enabled) are used for role assignment +* xref:jaas-authentication.adoc[Custom JAAS login modules] - if other Hazelcast provided authentication mechanisms don't fully cover user needs + +During the authentication roles can be also assigned to the connecting clients, which are later used for the <>. + +== Security realms + +Named security configurations called security realms are used to map an authentication mechanism to a Hazelcast protocol (client or member). +Security realms enable you to define security configurations on the module which consumes it. + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- + + + + + + + + monitor + hazelcast + + + root + + + + + + + + +---- +-- + +YAML:: ++ +-- +[source,yaml] +---- +hazelcast: + security: + enabled: true + realms: + - name: simpleRealm + authentication: + simple: + users: + - username: test + password: 'V3ryS3cr3tString' + roles: + - monitor + - hazelcast + - username: man-center + password: 'HardToGuess' + roles: + - root +---- +-- + +Java:: ++ +[source,java] +---- +include::ROOT:example$/security/EnablingSecurity.java[tag=authn] +---- +==== + +Besides authentication, security realms can also contain xref:identity-configuration.adoc[Identity] and access-control-service configurations. + + +== Common authentication options + +All Hazelcast provided authentication types support some common configuration parameters. + +[cols="1,1,3"] +.Common Configuration Options +|=== +| Option Name +| Default Value +| Description + +| `skip-role` +| `false` +| When set to `true`, the authentication mechanism won't assign roles during authentication but will only verify the credentials. + +| `skip-identity` +| `false` +| When set to `true`, the authentication mechanism won't use the remote party name after the authentication. + +| `skip-endpoint` +| `false` +| When set to `true`, the authentication mechanism won't use the remote party IP address name after authentication. + +|=== + +For more advanced configuration options, see following sections. + +== Identity + +A security configuration element where members and clients have their own credentials configured is called an identity. +This identity can be a username-password pair, a token, or a Kerberos ticket. For more information, see xref:identity-configuration.adoc[Identity configuration]. + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- + + + + + + + + + + + + + + + + + + +---- +-- + +YAML:: ++ +-- +[source,yaml] +---- +hazelcast: + security: + enabled: true + realms: + - name: aRealm + authentication: + ldap: +# ... + identity: + username-password: + username: uid=hazelcast,ou=Services,dc=hazelcast,dc=com + password: theSecret + member-authentication: + realm: aRealm + client-authentication: + realm: aRealm + +---- +-- + +Java:: ++ +[source,java] +---- +include::ROOT:example$/security/EnablingSecurity.java[tag=identity] +---- +==== + + +== Authorization + +Authorization is supported by the Client protocol. Clients are assigned roles during authentication. Access is then controlled by +permissions assigned to the roles. + +Authorization isn't supported in member-to-member communications. All members +have unlimited access to the cluster data once they are authenticated. + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- + + + + + + + + + + + + + all + + + + + +---- +-- + +YAML:: ++ +-- +[source,yaml] +---- +hazelcast: + security: + enabled: true + realms: + - name: aRealm +# ... + client-authentication: + realm: aRealm + client-permissions: + all: + principal: man-center + map: + - name: playground + principal: * + actions: + - all + +---- +-- + +Java:: ++ +[source,java] +---- +include::ROOT:example$/security/EnablingSecurity.java[tag=authz] +---- +==== + +For more information, see xref:client-authorization.adoc[]. diff --git a/docs/modules/security/pages/native-client-security.adoc b/docs/modules/security/pages/client-authorization.adoc similarity index 77% rename from docs/modules/security/pages/native-client-security.adoc rename to docs/modules/security/pages/client-authorization.adoc index d851e1562..5e6c4eaf7 100644 --- a/docs/modules/security/pages/native-client-security.adoc +++ b/docs/modules/security/pages/client-authorization.adoc @@ -1,132 +1,31 @@ -= Client Security -:description: To protect your members from a malicious client, you can allow them to identify clients and restrict their permissions to access either data in data structures or features such as user code deployment. += Client Authorization :page-enterprise: true -{description} +To protect your members from a malicious client, you can allow them to identify clients and restrict their permissions to access data in data structures or use features such as user code deployment. -To allow members to identify clients, set up <>. - -To allow members to restrict client permissions, set up <>. - -== Authenticating Clients - -To implement the client authentication, reference a xref:security-realms.adoc[Security Realm] -with the `authentication` section defined in the `client-authentication` setting -of a cluster member's configuration. - -The `authentication` configuration defines a method used to verify the client's identity -and assign its roles. - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- - - ... - - - - - - ldap://corp-ldap.example.com/ - cn - - - - - - - ... - ----- --- - -YAML:: -+ -[source,yaml] ----- -hazelcast: - security: - enabled: true - realms: - name: clientRealm - authentication: - ldap: - url: ldap://corp-ldap.example.com/ - role-mapping-attribute: cn - client-authentication: - realm: clientRealm ----- -==== - -The identity of the connecting client is defined on the client side. -Usually, there are no security realms on the clients, but just identity -defined directly in the security configuration. - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- - - ... - - - - ... - ----- --- - -YAML:: -+ -[source,yaml] ----- -hazelcast-client: - security: - username-password: - username: uid=member1,dc=example,dc=com - password: s3crEt ----- -==== - -On the clients, you can use the same identity types as in security realms: - -* `username-password` -* `token` -* `kerberos` (may require an additional security realm definition) -* `credentials-factory` - -== Authorizing Clients +The xref:security:authentication-overview.adoc[] describes how authentication is used for verifying credentials, and roles mapping. This section describes how the assigned role names are used to map permissions to clients. Hazelcast client authorization is configured by a client permission policy. Hazelcast has a default permission policy implementation that uses permission configurations defined in the Hazelcast security configuration. -Default policy permission checks are done against instance types (map, queue, etc.), +Default policy permission checks are made against instance types (map, queue, etc.), instance names, instance actions (put, read, remove, add, etc.), the client endpoint address (`ClusterEndpointPrincipal`), and client roles (`ClusterRolePrincipal`). -The default permission policy allows to use comma separated names in the `principal` +The default permission policy allows you to use comma separated names in the `principal` attribute configuration. [NOTE] ==== -Unless part of the role name, do not include spaces when adding names to the `principal` attribute. +Unless part of the role name, don't include spaces when adding names to the `principal` attribute. -Hazelcast does not automatically remove spaces in role names. If you include spaces that are not part of the name, permission is not granted to the intended role. +Hazelcast doesn't automatically remove spaces in role names. If you include spaces that aren't part of the name, permission isn't granted to the intended role. -For example, if you configure permissions for the *admin* and *devel* roles using ``principal=" admin ,devel"``, the *admin* role is not granted the permission. +For example, if you configure permissions for the *admin* and *devel* roles using ``principal=" admin ,devel"``, the *admin* role isn't granted the permission. ==== You can define the instance and principal names as wildcards using the `"*"` character. -See the xref:configuration:using-wildcards.adoc[Using Wildcards] section for details. +For more information, see xref:configuration:using-wildcards.adoc[Using Wildcards]. The endpoint names can use range characters `"-"` and `"*"` as described in the xref:clusters:network-configuration.adoc#interfaces[Interfaces] section. @@ -275,18 +174,18 @@ The `IPermissionPolicy.getPermissions(Subject subject, Class`` permission grants clients access to all data and features. [tabs] ==== @@ -317,10 +216,10 @@ all: === Management Permission -This permission defines which +The ``` permission defines which client principals/endpoints are allowed to perform management tasks. -Here, the client we mention is the one that is used by Hazelcast Management Center -when it connects to the clusters. To learn more about this client, see xref:{page-latest-supported-mc}@management-center:ROOT:connecting-members.adoc[]. +The client specified in the following code example is used by Hazelcast Management Center +when it connects to clusters. To learn more about this client, see xref:{page-latest-supported-mc}@management-center:ROOT:connecting-members.adoc[]. [tabs] ==== @@ -349,9 +248,9 @@ management: ---- ==== -=== Map Permission +=== Map permission -Actions: all, create, destroy, index, intercept, listen, lock, put, read, remove +Actions: all, create, destroy, index, intercept, listen, lock, put, read, remove . [tabs] ==== @@ -386,9 +285,9 @@ map: ---- ==== -=== Queue Permission +=== Queue permission -Actions: add, all, create, destroy, listen, read, remove +Actions: add, all, create, destroy, listen, read, remove. [tabs] ==== @@ -423,9 +322,9 @@ queue: ---- ==== -=== MultiMap Permission +=== MultiMap permission -Actions: all, create, destroy, listen, lock, put, read, remove +Actions: all, create, destroy, listen, lock, put, read, remove. [tabs] ==== @@ -460,9 +359,9 @@ multimap: ---- ==== -=== Replicated Map Permission +=== Replicated map permission -Actions: all, create, destroy, index, intercept, listen, lock, put, read, remove +Actions: all, create, destroy, index, intercept, listen, lock, put, read, remove. [tabs] ==== @@ -497,9 +396,9 @@ replicatedmap: ---- ==== -=== Topic Permission +=== Topic permission -Actions: create, destroy, listen, publish +Actions: create, destroy, listen, publish. [tabs] ==== @@ -534,9 +433,9 @@ topic: ---- ==== -=== Reliable Topic Permission +=== Reliable topic permission -Actions: create, destroy, listen, publish +Actions: create, destroy, listen, publish. [tabs] ==== @@ -571,9 +470,9 @@ reliable-topic: ---- ==== -=== List Permission +=== List permission -Actions: add, all, create, destroy, listen, read, remove +Actions: add, all, create, destroy, listen, read, remove. [tabs] ==== @@ -608,9 +507,9 @@ list: ---- ==== -=== Set Permission +=== Set permission -Actions: add, all, create, destroy, listen, read, remove +Actions: add, all, create, destroy, listen, read, remove. [tabs] ==== @@ -645,9 +544,9 @@ set: ---- ==== -=== Ringbuffer Permission +=== Ringbuffer permission -Actions: add, put, read, create, destroy +Actions: add, put, read, create, destroy. [tabs] ==== @@ -682,9 +581,9 @@ ringbuffer: ---- ==== -=== Lock Permission +=== Lock permission -Actions: all, create, destroy, lock, read +Actions: all, create, destroy, lock, read. [tabs] ==== @@ -719,9 +618,9 @@ lock: ---- ==== -=== AtomicLong Permission +=== AtomicLong permission -Actions: all, create, destroy, modify, read +Actions: all, create, destroy, modify, read. [tabs] ==== @@ -756,9 +655,9 @@ atomic-long: ---- ==== -=== AtomicReference Permission +=== AtomicReference permission -Actions: all, create, destroy, modify, read +Actions: all, create, destroy, modify, read. [tabs] ==== @@ -794,9 +693,9 @@ atomic-reference: ==== -=== CountDownLatch Permission +=== CountDownLatch permission -Actions: all, create, destroy, modify, read +Actions: all, create, destroy, modify, read. [tabs] ==== @@ -831,9 +730,9 @@ countdown-latch: ---- ==== -=== FlakeIdGenerator Permission +=== FlakeIdGenerator permission -Actions: all, create, destroy, modify, read +Actions: all, create, destroy, modify, read. [tabs] ==== @@ -868,9 +767,9 @@ flake-id-generator: ---- ==== -=== Semaphore Permission +=== Semaphore permission -Actions: all, acquire, create, destroy, read, release +Actions: all, acquire, create, destroy, read, release. [tabs] ==== @@ -942,9 +841,9 @@ executor-service: ---- ==== -=== Durable Executor Service Permission +=== Durable executor service permission -Actions: all, create, destroy +Actions: all, create, destroy. [tabs] ==== @@ -979,9 +878,9 @@ durable-executor-service: ---- ==== -=== Scheduled Executor Service Permission +=== Scheduled executor service permission -Actions: all, create, destroy, read, modify +Actions: all, create, destroy, read, modify. [tabs] ==== @@ -1016,9 +915,9 @@ scheduled-executor-service: ---- ==== -=== Cardinality Estimator Permission +=== Cardinality estimator permission -Actions: all, create, destroy, read, modify +Actions: all, create, destroy, read, modify. [tabs] ==== @@ -1053,9 +952,9 @@ cardinality-estimator: ---- ==== -=== PN Counter Permission +=== PN counter permission -Actions: all, create, destroy, read, modify +Actions: all, create, destroy, read, modify. [tabs] ==== @@ -1090,7 +989,7 @@ pn-counter: ---- ==== -=== Transaction Permission +=== Transaction permission [tabs] ==== @@ -1119,9 +1018,9 @@ transaction: ---- ==== -=== Cache Permission +=== Cache permission -Actions: all, create, destroy, listen, put, read, remove +Actions: all, create, destroy, listen, put, read, remove. [tabs] ==== @@ -1157,12 +1056,12 @@ cache: ==== NOTE: The name provided in `cache-permission` must be the Hazelcast distributed -object name corresponding to the `Cache` as described in +object name that corresponds to the `Cache` as described in the xref:jcache:hazelcast-integration.adoc[JCache - Hazelcast Instance Integration section]. -=== Vector Collection Permission (Beta) +=== Vector collection permission (Beta) -Actions: all, create, destroy, put, read, remove, optimize +Actions: all, create, destroy, put, read, remove, optimize. [tabs] ==== @@ -1197,9 +1096,9 @@ vector-collection: ---- ==== -=== User Code Deployment Permission +=== User Code Deployment permission -Actions: all, deploy +Actions: all, deploy/ include::clusters:partial$ucn-migrate-tip.adoc[] @@ -1235,7 +1134,7 @@ user-code-deployment: ---- ==== -If you have migrated to {ucn} use the following permissions: +If you have migrated to {ucn}, use the following permissions: [tabs] ==== @@ -1269,7 +1168,7 @@ user-code-namespace: ---- ==== -=== Configuration Permission +=== Configuration permission This permission defines which client principals/endpoints are allowed to @@ -1302,7 +1201,7 @@ config: ---- ==== -=== Job Permission +=== Job permission Actions: @@ -1316,10 +1215,10 @@ WARNING: When you query a streaming source with SQL, Hazelcast runs that query a - `export-snapshot`: Export or read snapshots. - `add-resources`: Upload resources and classes as well as jobs to members. + -WARNING: Hazelcast cannot check permissions in code that's uploaded with a job, If you enable this permission, clients can xref:pipelines:job-security.adoc[upload custom code] that ignores any configured permissions. +WARNING: Hazelcast can't check permissions in code that's uploaded with a job, If you enable this permission, clients can xref:pipelines:job-security.adoc[upload custom code] that ignores any configured permissions. - `all`: Enable all actions. -All actions for job permissions also enable the `read` action. For example if you enable the `create` action, the `read` action is automatically enabled as well. +All actions for job permissions also enable the `read` action. For example, if you enable the `create` action, the `read` action is automatically enabled as well. [tabs] ==== @@ -1349,7 +1248,7 @@ job: ==== -=== Connector Permission +=== Connector permission You can give permissions to the following xref:integrate:connectors.adoc[connectors]: @@ -1419,7 +1318,7 @@ connector: WARNING: To protect external systems from being reached by external connectors (JDBC, Mongo, S3, ...), use other means than Hazelcast client permissions. Traditionally, this is done by enabling authentication on the external system and/or setting up firewall rules. -=== SQL Permission +=== SQL permission You can give clients permission to use the following xref:sql:sql-statements.adoc[SQL statements]: @@ -1524,26 +1423,25 @@ sql: ==== [[handling-permissions-when-a-new-member-joins]] -=== Handling Permissions When a New Member Joins +=== Handling permissions when a new member joins By default, the set of permissions defined in the leader member of a cluster is -distributed to the newly joining members, overriding their own permission -configurations, if any. However, you can configure a new member to be joined, so that -it keeps its own set of permissions and even send these to the existing members in +distributed to new members that join, overriding their own permission +configurations, if any. However, you can configure a new member to join but keep its own set of permissions and even send these to the existing members in the cluster. This can be done dynamically, without needing to restart the cluster, using either one of the following configuration options: * the `on-join-operation` configuration attribute * the `setOnJoinPermissionOperation()` method -Using the above, you can choose whether a new member joining to a cluster will +You can use these options to choose whether a new member joining to a cluster will apply the client permissions stored in its own configuration, or use the ones -defined in the cluster. The behaviors that you can specify with the configuration +defined in the cluster. The behaviors that you specify with the configuration are `RECEIVE`, `SEND` and `NONE`, which are described after the examples below. -The following are the examples for both approaches on how to use them: +The following examples show how to use both approaches: -**Declarative Configuration:** +**Declarative configuration:** [tabs] ==== @@ -1577,7 +1475,7 @@ hazelcast: ---- ==== -**Programmatic Configuration:** +**Programmatic configuration:** [source,java] ---- @@ -1590,24 +1488,24 @@ config.getSecurityConfig() The behaviors are explained below: * `RECEIVE`: Applies the permissions from the leader member in the -cluster before join. This is the default value. -* `SEND`: Doesn't apply the permissions from the leader member before join. -If the security is enabled, then it refreshes or replaces the cluster wide -permissions with the ones in the new member after the join is complete. -This option is suitable for the scenarios where you need to replace the -cluster wide permissions without restarting the cluster. -* `NONE`: Neither applies pre-join permissions, nor sends the local permissions -to the other members. It means that the new member does not send its own +cluster before joining. This is the default value. +* `SEND`: Doesn't apply the permissions from the leader member before joining. +If security is enabled, then it refreshes or replaces the cluster wide +permissions with the ones in the new member after it joins. +This option is suitable for scenarios where you need to replace the +cluster-wide permissions without restarting the cluster. +* `NONE`: Neither applies pre-join permissions nor sends the local permissions +to the other members. It means that the new member doesn't send its own permission definitions to the cluster, but keeps them when it joins. However, after the join, when you update the permissions in the other cluster members, -those updates are also sent to the newly joining member. Therefore, this option -is suitable for the scenarios where you need to elevate privileges temporarily +those updates are also sent to the new member. Therefore, this option +is suitable for scenarios where you need to elevate privileges temporarily on a single member (preferably a xref:management:cluster-utilities.adoc#enabling-lite-members[lite member]) for a -limited time period. The clients which want to use these temporary permissions +limited time period. The clients which need to use these temporary permissions have to access the cluster through this single new member, meaning that you need to configure the xref:clients:java.adoc#configure-cluster-routing-mode[SINGLE_MEMBER] cluster routing mode for such clients. + -Note that, the `create` and `destroy` permissions will not work when using +Note that the `create` and `destroy` permissions won't work when using the `NONE` option, since the distributed objects need to be created/destroyed on all the members. + The following is an example for a scenario where `NONE` is used: @@ -1639,20 +1537,20 @@ hzLite.shutdown(); ---- [[deny-permissions]] -=== Deny Permissions +=== Deny permissions -Hazelcast employs Additive Access Control as its default security mechanism. +Hazelcast employs additive access control as its default security mechanism. When a client connects to a security-enabled cluster, it is initially granted no permissions. As a result, access to protected resources is inherently denied unless explicit permissions are configured and granted to specific roles. -The Additive Access Control approach has limited expression capabilities and +The additive access control approach has limited expression capabilities and is not well-suited for configurations involving simple exclusions. For example, it's challenging to allow access to all maps except the one named `"private"`. -To address this limitation, Hazelcast introduces the concept of Deny Permissions -(or Deny Rules). +To address this limitation, Hazelcast introduces the concept of deny permissions +(or deny rules). Within the permission configuration, there is a `boolean` flag called `deny` that enables permission subtraction. @@ -1708,7 +1606,7 @@ hazelcast: ==== [[priority-of-grant-and-deny-permissions]] -=== Priority of Grant and Deny Permissions +=== Priority of grant and deny permissions By default, when a permission is both granted and denied, the denial takes precedence. In other words, if conflicting permissions exist, denial prevails. @@ -1751,15 +1649,15 @@ hazelcast: ==== [[permission-evaluation-table]] -==== Permission Evaluation Table +==== Permission evaluation table -The table below illustrates how permission evaluation changes when `priority-grant` is configured. +The table below illustrates how permission evaluation changes when `priority-grant` is configured: [options="header"] |============================================================================ | Permission Implication | `priority-grant=false` (default) | `priority-grant=true` -| No Grant or Deny Implication | Denied | Granted -| Implication from Grant only | Granted | Granted -| Implication from Deny only | Denied | Denied -| Both Grant and Deny Imply | Denied | Granted +| No Grant or Deny Implication | ❌ | ✅ +| Implication from Grant only | ✅ | ✅ +| Implication from Deny only | ❌ | ❌ +| Both Grant and Deny Imply | ❌ | ✅ |============================================================================ diff --git a/docs/modules/security/pages/cluster-member-security.adoc b/docs/modules/security/pages/cluster-member-security.adoc deleted file mode 100644 index 30a895a1e..000000000 --- a/docs/modules/security/pages/cluster-member-security.adoc +++ /dev/null @@ -1,58 +0,0 @@ -= Cluster Member Security -:page-enterprise: true - -Hazelcast supports the standard Java Security (JAAS) based authentication -between the cluster members. A xref:security:security-realms.adoc[Security Realm] can -be referenced by `` element to define authentication -between the member and identity of the current member. - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- - - ... - - - - - - ldap://corp-ldap.example.com/ - - - - - - - - - - ... - ----- --- - -YAML:: -+ -[source,yaml] ----- -hazelcast: - security: - enabled: true - realms: - name: memberRealm - authentication: - ldap: - url: ldap://corp-ldap.example.com - identity: - username-password: - username: uid=member1,dc=example,dc=com - password: s3crEt - member-authentication: - realm: memberRealm ----- -==== \ No newline at end of file diff --git a/docs/modules/security/pages/default-authentication.adoc b/docs/modules/security/pages/default-authentication.adoc index aa7a3215b..296174e4a 100644 --- a/docs/modules/security/pages/default-authentication.adoc +++ b/docs/modules/security/pages/default-authentication.adoc @@ -2,16 +2,21 @@ [[default-authentication]] :page-enterprise: true -The Default Authentication is used when security is enabled and no explicit -authentication configuration is provided. It can happen when: +NOTE: When Hazelcast security is enabled, we recommended you explicitly specify +xref:security:authentication-overview.adoc[authentication types] for +client-to-member and member-to-member authentication. The default authentication method +described in this section is kept in Hazelcast for backward compatibility reasons. + +Default authentication is used when security is enabled and no explicit +authentication configuration is provided. This can happen when: * `member-authentication` is not configured * the security realm referenced by `member-authentication` doesn't contain the `authentication` configuration * `client-authentication` is not configured -* the security realm referenced by `client-authentication` doesn't contain the `authentication` configuration. +* the security realm referenced by `client-authentication` doesn't contain the `authentication` configuration -The behavior of the default authentication mechanism depends on **member's identity configuration** -(i.e., `identity` configuration in the security realm referenced from `member-authentication`). +The behavior of the default authentication mechanism depends on the **member's identity configuration** +(i.e. `identity` configuration in the security realm referenced from `member-authentication`). If the `identity` is configured as a `username-password`, then the authenticated username and password credentials are checked for equality with these configured ones. In all other cases, only the incoming -cluster name is checked for equality with the one configured on the authenticating member. \ No newline at end of file +cluster name is checked for equality with the one configured on the authenticating member. diff --git a/docs/modules/security/pages/enable-security.adoc b/docs/modules/security/pages/enable-security.adoc new file mode 100644 index 000000000..3fc469eeb --- /dev/null +++ b/docs/modules/security/pages/enable-security.adoc @@ -0,0 +1,54 @@ += Enabling Security +:page-enterprise: true +:page-aliases: enabling-jaas.adoc + +With Hazelcast's extensible security you can: + +* authenticate both cluster members and clients, and +* perform access control checks on client operations. + +You can control access based on roles assigned to clients +and client endpoint address. + +You can enable security declaratively or programmatically, +as shown below. + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- + + ... + + ... + + ... + +---- +-- + +YAML:: ++ +-- +[source,yaml] +---- +hazelcast: + security: + enabled: true +---- +-- + +Java:: ++ +[source,java] +---- +include::ROOT:example$/security/EnablingSecurity.java[tag=es] +---- +==== + +For information on how to set your Hazelcast {enterprise-product-name} +license, see xref:deploy:using-enterprise-edition.adoc#setting-the-license-key[]. diff --git a/docs/modules/security/pages/enabling-jaas.adoc b/docs/modules/security/pages/enabling-jaas.adoc deleted file mode 100644 index f6adf2362..000000000 --- a/docs/modules/security/pages/enabling-jaas.adoc +++ /dev/null @@ -1,53 +0,0 @@ -= Enabling JAAS Security -:page-enterprise: true - -With Hazelcast's extensible, JAAS based security feature, you can: - -* authenticate both cluster members and clients -* and perform access control checks on client operations. -Access control can be done according to endpoint principal -and/or endpoint address. - -You can enable security declaratively or programmatically, -as shown below. - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- - - ... - - ... - - ... - ----- --- - -YAML:: -+ --- -[source,yaml] ----- -hazelcast: - security: - enabled: true ----- --- - -Java:: -+ -[source,java] ----- -include::ROOT:example$/security/EnablingSecurity.java[tag=es] ----- -==== - -Also, see the xref:deploy:using-enterprise-edition.adoc#setting-the-license-key[Setting License Key section] -for information about how to set your [blue]#Hazelcast {enterprise-product-name}# -license. \ No newline at end of file diff --git a/docs/modules/security/pages/encryption.adoc b/docs/modules/security/pages/encryption.adoc index 70f26ac1e..3dbd4f185 100644 --- a/docs/modules/security/pages/encryption.adoc +++ b/docs/modules/security/pages/encryption.adoc @@ -2,7 +2,7 @@ :page-enterprise: true WARNING: The symmetric encryption feature has been deprecated. You can use the -TLS/SSL protocol to establish an encrypted communication +TLS protocol to establish an encrypted communication across your Hazelcast cluster. Hazelcast offers features which allow to reach a required privacy on @@ -16,7 +16,7 @@ There are two different encryption features: ** supported by members and clients ** TCP-only, i.e., multicast join messages are not encrypted + -More details in the xref:security:tls-ssl.adoc[TLS/SSL section] +More details in the xref:security:tls-ssl.adoc[TLS section] + . Symmetric encryption for Hazelcast member protocol ** only supported by the members; communication with clients is not encrypted diff --git a/docs/modules/security/pages/identity-configuration.adoc b/docs/modules/security/pages/identity-configuration.adoc new file mode 100644 index 000000000..646ba4e9c --- /dev/null +++ b/docs/modules/security/pages/identity-configuration.adoc @@ -0,0 +1,201 @@ += Identity configuration +:page-enterprise: true + +Identity configuration enables you to define your own <>, which are are used to authenticate to other systems. + +The following identity configuration types are available: + +* `username-password`: defines a new `PasswordCredentials` object +* `token`: defines a new `TokenCredentials` object +* `kerberos`: defines the Kerberos identity that uses the +service tickets stored in the `TokenCredentials` object +* `credentials-factory`: configures the factory class which creates the `Credentials` objects + +== Username-Password identity + +The username with password is the most typical type of credentials. +This is configured by the `` +XML configuration element as shown below: + +[tabs] +==== +XML:: ++ +-- +[source,xml] +---- +include::ROOT:example$/hazelcast-password-realm.xml[tag=password] +---- +-- + +YAML:: ++ +-- +[source,yaml] +---- +realms: + name: passwordRealm + identity: + username-password: + username: member1 + password: s3crEt +member-authentication: + realm: passwordRealm +---- +-- + +Java:: ++ +-- +[source,java] +---- +include::ROOT:example$/SecurityXmlTest.java[tag=password-realm] +---- +-- +==== + +== Token identity + +Tokens are also configurable for +identity representation. The `` XML configuration element +supports either plain ASCII tokens or Base64 encoded values. +The optional `encoding` argument can have either `base64` or `none` (default) +as its value. + +The following two realms define the same token value - bytes of the "Hazelcast" string: + +[tabs] +==== +XML:: ++ +-- +[source,xml] +---- +include::ROOT:example$/hazelcast-authentication-types.xml[tag=token] +---- +-- + +YAML:: ++ +-- +[source,yaml] +---- +realms: + - name: tokenRealm1 + identity: + token: + value: Hazelcast + - name: tokenRealm2 + identity: + token: + encoding: base64 + value: SGF6ZWxjYXN0 +---- +-- + +Java:: ++ +-- +[source,java] +---- +include::ROOT:example$/SecurityXmlTest.java[tag=token-realm] +---- +-- +==== + +Hazelcast doesn't provide an xref:authentication-overview.adoc[authentication type] with direct `token` identity support. Tokens are usually used together with custom JAAS login modules. + +== Kerberos identity + +The `kerberos` identity type is used to retrieve Kerberos service tickets to access +a member using the `kerberos` authentication type. +For more information on the `kerberos` identity, see xref:kerberos-authentication.adoc#identity-configuration[Kerberos authentication]. + +== Credentials factory + +The most flexible way to define an identity is via <> objects +created by a custom credential factory. This is an implementation +of the `com.hazelcast.security.ICredentialsFactory` +interface. The `newCredentials()` method provides the credentials. + +The XML configuration uses the `` element to define the factory class. + +The behavior of credential factories can be controlled by specifying factory properties. +The properties are provided in the `init(Properties)` method. + +See the following sample configuration: + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- +include::ROOT:example$/hazelcast-authentication-types.xml[tag=credentialsFactoryRealm] +---- +-- + +YAML:: ++ +[source,yaml] +---- +realms: + name: credentialsFactoryRealm + identity: + credentials-factory: + class-name: com.examples.TOTPCredentialsFactory + properties: + seed: 3132333435363738393031323334353637383930 +---- +==== + +[[credentials]] +=== Credentials + +One of the key elements in Hazelcast security is the `Credentials` object, which +represents evidence of the identity (member or client). +The content of `Credentials` object is verified during the authentication. +Credentials is an interface which extends `Serializable`. + +[source,java] +---- +public interface Credentials extends Serializable { + String getName(); +} +---- + +There are two subtype interfaces which simplify `Credentials` usage. +The subtypes reflect data provided in the client authentication messages: + +* Name and password (`com.hazelcast.security.PasswordCredentials`) +* Byte array token (`com.hazelcast.security.TokenCredentials`) + +The interfaces have the following forms: + +[source,java] +---- +public interface PasswordCredentials extends Credentials { + String getPassword(); +} +---- + +[source,java] +---- +public interface TokenCredentials extends Credentials { + byte[] getToken(); + + default Data asData() { + return new HeapData(getToken()); + } +} +---- + +The `Credentials` instance can be retrieved in the login modules +by handling a `CredentialsCallback`, as shown below: + +[source,java] +---- +include::ROOT:example$/security/CustomLoginModuleTest.java[tag=credentials-callback] +---- diff --git a/docs/modules/security/pages/integrating-openssl.adoc b/docs/modules/security/pages/integrating-openssl.adoc index fff42b2ca..9883b5e67 100644 --- a/docs/modules/security/pages/integrating-openssl.adoc +++ b/docs/modules/security/pages/integrating-openssl.adoc @@ -1,14 +1,10 @@ = Integrating OpenSSL / BoringSSL :page-enterprise: true -NOTE: You cannot integrate OpenSSL into Hazelcast when xref:encryption.adoc[Symmetric Encryption] is enabled. -Also note that the symmetric encryption feature has been deprecated. - -TLS/SSL in Java is normally provided by the JRE. However, the performance overhead can be -significant; even with AES intrinsics -enabled. If you are using an x86_64 system (Linux, Mac, Windows), Hazelcast supports native -integration for TLS/SSL which can provide significant performance -improvements. There are two supported native TLS/SSL libraries available through +TLS in Java is normally provided by the Java runtime (JRE). However, the performance overhead can be +significant; even with AES intrinsics enabled. If you are using an x86_64 system (Linux, Mac, Windows), Hazelcast supports native +integration for TLS which can provide significant performance +improvements. There are two supported native TLS libraries available through https://netty.io/wiki/forked-tomcat-native.html[netty-tcnative^] libraries: * OpenSSL @@ -26,13 +22,9 @@ configure the appropriate cipher suite(s). Check the https://netty.io/wiki/forked-tomcat-native.html[netty-tcnative^] page for installation details. -NOTE: If OpenSSL capabilities are detected -(also the appropriate Java libraries are included) and if no -explicit `SSLEngineFactory` is set, Hazelcast defaults to use OpenSSL. - == Netty Libraries -For the native TLS/SSL integration in Java, the https://netty.io/[Netty^] library is used. +For native TLS integration in Java, the https://netty.io/[Netty^] library is used. Make sure the following libraries from the Netty framework and their dependencies are on the classpath: @@ -41,11 +33,12 @@ Make sure the following libraries from the Netty framework and their dependencie ** either BoringSSL: `netty-tcnative-boringssl-static` ** or OpenSSL: `netty-tcnative` (for given OS architecture) -NOTE: It is very important that the version of Netty JAR(s) corresponds to -a very specific version of `netty-tcnative`. In case of doubt, the -simplest thing to do is to download the `netty-.tar.bz2` file +NOTE: The `netty-handler` and `tcnative` artifacts have different versioning strategies. +It is important that the these versions are compatible. The safe way +is to download the `netty-.tar.bz2` file from the https://netty.io/downloads.html[Netty^] website and check which -`netty-tcnative` version is used for that Netty release. +`netty-tcnative` version is used for that Netty release. Other possibility +is to use import `netty-bom` in Maven for dependency management. == Using BoringSSL @@ -56,16 +49,26 @@ Example Maven dependencies: [source,xml] ---- + + + + io.netty + netty-bom + ${netty.version} + import + pom + + + + io.netty netty-tcnative-boringssl-static - ${netty-tcnative.version} io.netty netty-handler - ${netty.version} ---- @@ -87,17 +90,27 @@ Example Maven dependencies (for Linux): [source,xml] ---- + + + + io.netty + netty-bom + ${netty.version} + import + pom + + + + io.netty netty-tcnative - ${netty-tcnative.version} linux-x86_64 io.netty netty-handler - ${netty.version} ---- @@ -105,7 +118,7 @@ Example Maven dependencies (for Linux): == Configuring Hazelcast for OpenSSL Configuring OpenSSL in Hazelcast is straight forward. On the client and/or -member side, the following snippet enables TLS/SSL +member side, the following snippet enables TLS using OpenSSL: [tabs] @@ -155,7 +168,7 @@ hazelcast: ---- ==== -The configuration is similar to a regular TLS/SSL integration. The main differences +The configuration is similar to a regular TLS integration. The main differences are the `OpenSSLEngineFactory` factory class and the following properties: * `keyFile`: Path of your PKCS#8 key file in PEM format. diff --git a/docs/modules/security/pages/jaas-authentication.adoc b/docs/modules/security/pages/jaas-authentication.adoc index e243208fa..23ba39063 100644 --- a/docs/modules/security/pages/jaas-authentication.adoc +++ b/docs/modules/security/pages/jaas-authentication.adoc @@ -1,8 +1,50 @@ -= JAAS authentication += JAAS Authentication [[jaas-authentication]] :page-enterprise: true -== JAAS Principals used in Hazelcast +The `jaas` authentication setting is the most flexible +form of authentication, but requires knowledge +of JAAS login modules and related concepts. You can use +custom login modules and order them in a login module stack. + +The following is a sample configuration which authenticates against an LDAP server or +database as a fallback: + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- +include::ROOT:example$/hazelcast-authentication-types.xml[tag=jaas] +---- +-- + +YAML:: ++ +[source,yaml] +---- + realms: + - name: jaasRealm + authentication: + jaas: + - class-name: com.examples.LdapLoginModule + usage: SUFFICIENT + properties: + url: ldap://corp-ldap + - class-name: com.examples.DatabaseLoginModule + usage: SUFFICIENT + properties: + type: ora18 + host: corp-db + table: USERS +---- +==== + + +== JAAS principals used in Hazelcast Hazelcast works with the following JAAS https://docs.oracle.com/javase/8/docs/api/java/security/Principal.html[Principal^] implementations added to the https://docs.oracle.com/javase/8/docs/api/javax/security/auth/Subject.html[Subject^]: @@ -23,7 +65,7 @@ Set hazelcastPrincipals = subject.getPrincipals(HazelcastPrincipal.class); ---- -=== Callbacks Supported in Login Modules +== Callbacks Supported in Login Modules JAAS https://docs.oracle.com/javase/8/docs/api/javax/security/auth/callback/Callback.html[Callback^] instances are used for accessing different kinds of data from the diff --git a/docs/modules/security/pages/kerberos-authentication.adoc b/docs/modules/security/pages/kerberos-authentication.adoc new file mode 100644 index 000000000..f47b0a945 --- /dev/null +++ b/docs/modules/security/pages/kerberos-authentication.adoc @@ -0,0 +1,419 @@ += Kerberos Authentication +:page-enterprise: true + +The Kerberos authentication protocol is one of the standard solutions +for single sign-on (SSO). Hazelcast +supports Kerberos authentication as an {enterprise-product-name} feature and also +provides Kerberos integration to LDAP-based authorizations. + +The Kerberos support in Hazelcast has 2 configuration parts: identity +and authentication. The identity part is responsible for retrieving the service +ticket from Kerberos KDC (Key Distribution Center). +The authentication part verifies the service tickets. + +Default Service principal names for Hazelcast members are in the form +`hz/address@REALM`, for example `hz/192.168.1.1@ACME.COM`. + +Before a service ticket is issued, the client side of the connection has to be +authenticated, which means the TGT (Ticket Granting Ticket) is present in the Subject. + +== Simplified Kerberos configuration + +Both Hazelcast `kerberos` identity, and `kerberos` authentication delegate the ticket related tasks (such as TGT retrieval) to vendor-specific `Krb5LoginModule` +implementations. It normally uses the https://docs.oracle.com/en/java/javase/17/docs/api/jdk.security.auth/com/sun/security/auth/module/Krb5LoginModule.html[`com.sun.security.auth.module.Krb5LoginModule`] class. +The `security-ream` property in `kerberos` configurations allows referencing another realm with `Krb5LoginModule` configured. + +To simplify the Kerberos configuration process for new users, Hazelcast allows +skipping `Krb5LoginModule` JAAS configuration within separate security realms. +Instead, it's possible to define the `principal` and `keytab-file` options in the +`kerberos` identity and authentication configurations. +If these options are used instead of the `security-realm`, then a new temporary +realm is generated on the fly during authentication. + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- + + + + hz/127.0.0.1@HAZELCAST.COM + /opt/localhost.keytab + + + + + HAZELCAST.COM + hz/127.0.0.1@HAZELCAST.COM + /opt/localhost.keytab + + + +---- +-- + +YAML:: ++ +[source,yaml] +---- + realms: + - name: simpleKerberosRealm + authentication: + kerberos: + principal: hz/127.0.0.1@HAZELCAST.COM + keytab-file: /opt/localhost.keytab + identity: + kerberos: + realm: HAZELCAST.COM + principal: hz/127.0.0.1@HAZELCAST.COM + keytab-file: /opt/localhost.keytab +---- +==== + +A warning is logged during the first use of the simplified configuration form. +It includes the generated configuration, so you can use it as a starting point +to define the full Kerberos configuration. An example warning log is shown below: + +``` +12:37:41,187 WARN [KerberosCredentialsFactory] Using generated Kerberos initiator +realm configuration is not intended for production use. It's recommended +to properly configure the Krb5LoginModule manually to fit your needs. +Following configuration was generated from provided keytab and principal properties: + + + + + + true + true + true + true + true + /opt/localhost.keytab + hz/127.0.0.1@HAZELCAST.COM + + + + + +``` + +== Identity configuration + +The full Kerberos identity configuration references a security realm with `Krb5LoginModule` configured as an initiator: + +[tabs] +==== +Sample Kerberos Identity Configuration XML:: ++ +-- + +[source,xml] +---- + + + + ACME.COM + krb5Initiator + + + + + + + + + true + true + + + + + +---- +-- + +YAML:: ++ +[source,yaml] +---- + realms: + - name: kerberosRealm + identity: + kerberos: + realm: ACME.COM + security-realm: krb5Initiator + - name: krb5Initiator + authentication: + jaas: + class-name: com.sun.security.auth.module.Krb5LoginModule + properties: + useTicketCache: true + doNotPrompt: true +---- +==== + +The `` identity configuration has the following properties: + +[cols="1,1,3",options="header",] +.The Identity Configuration Options +|======================================================================= +| Property name +| Default value +| Description + +| `spn` +| +| Allows configuring static Service Principal Name (SPN). It's +meant for use cases where all the members share a single Kerberos identity. + +| `service-name-prefix` +| `"hz/"` +| Defines the prefix of SPN. By default the member's +principal name (for which this credentials +factory asks the service ticket) is in the form +`"[servicePrefix][memberIpAddress]@[REALM]"`, e.g., +`"hz/192.168.1.1@ACME.COM"`. + +| `realm` +| +| Kerberos realm name, e.g., `"ACME.COM"`. + +| `security-realm` +| +| Security realm name in the Hazelcast configuration used +for Kerberos authentication. The authentication configuration in the +referenced security realm will be used to fill the Subject with the Kerberos +credentials, e.g. TGT. + +| `use-canonical-hostname` +| `false` +| Flag which controls if canonical hostnames should be used instead of IP addresses +in generated Service Principal names. +This property is only used when the Service Principal name is not static, i.e. when `spn` option +isn't configured). + +| `principal` +| +| Kerberos principal name. This is a helper option which can be used together +with the `keytab-file` to replace the `security-realm` configuration. + +_We don't recommend using this property in production!_ + +| `keytab-file` +| +| Path to a keytab file with the current principal's secrets. +This is a helper option which can be used together +with the `principal` to replace the `security-realm` configuration. + +_We don't recommend using this property in production!_ +|======================================================================= + + +== Kerberos authentication + +The authenticating part on the server side is able to +accept Kerberos tickets and verify them. The Kerberos +authentication is delegated to another realm with the Kerberos login module +configured. + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- + + + + krb5Acceptor + + + + + + + + + false + false + true + true + true + hz/192.168.1.1@ACME.COM + /opt/member1.keytab + + + + + +---- +-- + +YAML:: ++ +[source,yaml] +---- + realms: + name: kerberosRealm + authentication: + kerberos: + security-realm: krb5Acceptor + name: krb5Acceptor + authentication: + jaas: + - class-name: com.sun.security.auth.module.Krb5LoginModule + usage: REQUIRED + properties: + isInitiator: false + useTicketCache: false + doNotPrompt: true + useKeyTab: true + storeKey: true + principal: hz/192.168.1.1@ACME.COM + keyTab: /opt/member1.keytab +---- +==== + +The `krb5Acceptor` realm configuration in the snippet only loads the Kerberos secrets from +a keytab file and it doesn't authenticate against a KDC. + +[cols="1,1,3",options="header",] +.kerberos> authentication configuration options +|======================================================================= +| Property name +| Default value +| Description + +| `relax-flags-check` +| `false` +| Allows disabling some of the checks on the +incoming token, e.g. passes authentication even if the mutual +authentication is required by the token. + +| `use-name-without-realm` +| `false` +| When set to `true`, then the Kerberos realm part is removed from the +authenticated name, e.g. `"jduke@ACME.COM"` becomes just `"jduke"`. + +| `security-realm` +| +|Security realm name in the Hazelcast configuration used +for Kerberos authentication. The authentication configuration in the +referenced security realm will be used to fill the Subject with the Kerberos +credentials, e.g. Keytab. + +| `principal` +| +| Kerberos principal name. This is a helper option which can be used together +with the `keytab-file` to replace the `security-realm` configuration. + +_We don't recommend using this property in production!_ + +| `keytab-file` +| +| Path to a keytab file with the current principal's secrets. +This is a helper option which can be used together +with the `principal` to replace the `security-realm` configuration. + +_We don't recommend using this property in production!_ +|======================================================================= + +The `GssApiLoginModule` (implementing Kerberos authentication) +derives from the abstract `ClusterLoginModule`. As a result the `` +configuration supports the common options, too: `skip-identity`, `skip-endpoint` and +`skip-role`. + +[NOTE] +==== +* Kerberos authentication in Hazelcast is only able to validate connections on +the server side. It doesn't support mutual authentication. +* The Generic Security Services API (GSS-API) isn't used for protecting (wrapping) +the messages after authentication, e.g. encryption, integrity checks. It's only used for +accepting tokens. +* The token itself is not protected against Man-in-the-Middle (MITM) attacks. +If an attacker is able to eavesdrop the token and use it before the +original sender, then the attacker succeeds with the authentication but +the original sender won't. +** There is a replay protection in Java which caches the already used tokens. +** Java Kerberos implementation accepts the token for 5 minutes (by default) +from its creation. +* Time has to be synchronized on machines where Kerberos is +used. + +If you are running Hazelcast in an untrusted network with a MITM attack +risk, then enable encryption on Hazelcast protocols to prevent stealing +the token. +==== + +=== Kerberos and LDAP integration + +Kerberos authentication allows loading role mapping information from +an LDAP server (usually the one backing the Kerberos KDC server, too). +Therefore, the `` authentication configuration is also available as +a sub-configuration of `` authentication. + +[tabs] +==== +Sample Kerberos Identity Configuration XML:: ++ +-- + +[source,xml] +---- + + + + true + krb5Acceptor + + ldap://ldap.hazelcast.com + GSSAPI + memberOf + krb5Initiator + (krb5PrincipalName=\{login}) + true + + + + +---- +-- + +YAML:: ++ +[source,yaml] +---- + realms: + - name: kerberosRealm + authentication: + kerberos: + skip-role: true + security-realm: krb5Acceptor + ldap: + url: ldap://ldap.hazelcast.com + system-authentication: GSSAPI + security-realm: krb5Initiator + skip-authentication: true + user-filter: "(krb5PrincipalName=\{login})" + role-mapping-attribute: memberOf +---- +==== + +NOTE: The Kerberos LDAP integration doesn't support credential delegation, +i.e. reusing client tickets for accessing the LDAP. It only allows using +the member's Kerberos credentials to authenticate into LDAP. + + +== Troubleshooting + +Usually `Krb5LoginModule` implementations provided by JVMs have a `debug` option allowing you to print details related to authentication. Please refer your JVM documentation to find more details or see xref:security-debugging.adoc[Security debugging] to find out +how to increase debug output for Kerberos in your JVM. + +canceled \ No newline at end of file diff --git a/docs/modules/security/pages/ldap-authentication.adoc b/docs/modules/security/pages/ldap-authentication.adoc new file mode 100644 index 000000000..eb2f8e64b --- /dev/null +++ b/docs/modules/security/pages/ldap-authentication.adoc @@ -0,0 +1,285 @@ += LDAP Authentication +:page-enterprise: true + +NOTE: A Lightweight Directory Access Protocol (LDAP) server is a specialized server that stores and manages directory information in a hierarchical structure. It's commonly used for centralized authentication and authorization, storing information like user credentials, groups, and permissions. Applications and systems query the LDAP server to retrieve or modify this information, often to authenticate users or manage access rights. It's widely used in environments like enterprise networks for user management and directory services. + +Hazelcast supports authentication and authorization against LDAP servers. +Authentication verifies the provided name and password, and authorization maps roles to the authenticated user. + +You can verify passwords during authentication by: + +* making a new LDAP bind operation with the given name and password +* using a separate "admin connection" to verify the provided password +against an LDAP object attribute. + +There are several ways to retrieve role mappings: + +* `attribute`: The role name is stored as an attribute in the object representing the identity. +* `direct` mapping: The identity object contains an attribute with reference to the role object(s). +* `reverse` mapping: The role objects with a reference to the identity object are searched. + +The `direct` and `reverse` mapping modes allow a role search recursion. + +[cols="1,1,3"] +.LDAP Configuration Options +|=== +| Option Name +| Default Value +| Description + +| `url` +| +| URL of the LDAP server. The value is configured as the JNDI environment +property, i.e. `java.naming.provider.url`. + +| `socket-factory-class-name` +| +| Socket factory class name. The factory can be used for fine-grained +configuration of the TLS protocol on top of the LDAP protocol, i.e. `ldaps` scheme. + +| `parse-dn` +| false +| If set to `true`, it treats the value of `role-mapping-attribute` as a DN and +extracts only the `role-name-attribute` values as role names. If set to `false`, +the whole value of `role-mapping-attribute` is used as a role name. + +This option is only used when the `role-mapping-mode` option has the value `attribute`. + +| `role-context` +| +| LDAP Context in which assigned roles are searched, e.g. `ou=Roles,dc=hazelcast,dc=com`. + +This option is only used when the `role-mapping-mode` option has the value `reverse`. + +| `role-filter` +| `([role-mapping-attribute]=\{MEMBERDN})` +| LDAP search string which usually contains a placeholder `\{MEMBERDN}` to be +replaced by the provided login name, e.g. `(member=\{MEMBERDN})`. + +If the role search recursion is enabled (see `role-recursion-max-depth`), the `\{MEMBERDN}` +is replaced by role DNs in the recurrent searches. + +This option is only used when the `role-mapping-mode` option has the value `reverse`. + +| `role-mapping-attribute` +| +| Name of the LDAP attribute which contains either the role name or role DN. + +This option is used when the `role-mapping-mode` option has the value `attribute` or `direct`. +If the mapping mode is `reverse`, the value is used in `role-filter` default value. + +| `role-mapping-mode` +| `attribute` +a| Role mapping mode. It can have one of the following values: + +* `attribute`: The user object in the LDAP contains the role name in the +given attribute. The role name can be parsed from a DN string when `parse-dn=true` +No additional LDAP query is done to find assigned roles. +* `direct`: The user object contains an attribute with DN(s) of assigned +role(s). Role objects are loaded from the LDAP and the role name is +retrieved from its attributes. Role search recursion can be enabled for this mode. +* `reverse`: The role objects are located by executing an LDAP search query +with the given `role-filter`. In this case, the role object usually contains +attributes with DNs of the assigned users. Role search recursion can be enabled for this mode. + +| `role-name-attribute` +| +| This option may refer to a name of LDAP attribute within the role object which +contains the role name in case of `direct` and `reverse` role mapping mode. It may also refer +to the attribute name within X.500 name stored in `role-mapping-attribute` when +`role-mapping-mode=attribute` and `parse-dn=true`. + +| `role-recursion-max-depth` +| 1 +| Sets the maximum depth of role search recursion. The default value 1 means +the role search recursion is disabled. + +This option is only used when the `role-mapping-mode` option has a `direct` or `reverse` value. + +| `role-search-scope` +| `subtree` +a| LDAP search scope used for `role-filter` search. It can have one of the following values: + +* `subtree`: Searches for objects in the given context and its subtree. +* `one-level`: Searches just one level under the given context. +* `object`: Searches (or tests) only for the context object itself (if it matches the filter criteria). + +This option is only used when the `role-mapping-mode` option has the value `reverse`. + +| `user-name-attribute` +| `uid` +| LDAP attribute name whose value is used as a name in +`ClusterIdentityPrincipal` added to the JAAS Subject. + +| `system-user-dn` +| +a| Admin account DN. If configured, then the following are true: + +* For the user and role object, search queries are used an admin connection instead +of the "user" one created by LDAP bind with provided credentials. +* LDAP authentication doesn't expect the full user DN to be provided as a login name. +It rather expects names like `"jduke"` than `"uid=jduke,ou=Engineering,o=Hazelcast,dc=com"`; +* The admin connection allows verifying the provided user credentials against a +value defined in the `password-attribute` option. + +| `system-user-password` +| +| Admin's password (for `system-user-dn` account). + + +| `system-authentication` +| `simple` +| Name of the authentication mechanism used for the admin LDAP connection. +It's used as a value for the JNDI environment property `Context#SECURITY_AUTHENTICATION`. +You can specify `GSSAPI` to authenticate with the Kerberos protocol. + +| `password-attribute` +| +| Credentials verification is done by the new LDAP binds by default. +However, the password can be stored in a non-default LDAP attribute; in this case use `password-attribute` to configure which +LDAP attribute (within the user object) contains the password. If the `password-attribute` option is provided, +then the extra LDAP bind to verify credentials is not done and passwords +are just compared within the Hazelcast code after retrieving the user object from LDAP server. + +This option is only used when the admin connection is configured, i.e. when `system-user-dn` or `system-authentication` is defined. + +| `user-context` +| +| LDAP context in which the user objects are searched, e.g., `ou=Users,dc=hazelcast,dc=com`. + +This option is only used when the admin connection is configured, i.e. when `system-user-dn` or `system-authentication` is defined. + +| `user-filter` +| `(uid=\{login})` +| LDAP search string for retrieving the user objects based on the provided login name. +It usually contains a placeholder substring `\{login}` which is replaced by the provided login name. + +This option is only used when the admin connection is configured, i.e. when `system-user-dn` or `system-authentication` is defined. + +| `user-search-scope` +| `subtree` +a| LDAP search scope used for `user-filter` search. It can have one of the following values: + +* `subtree`: Searches for objects in the given context and its subtree. +* `one-level`: Searches just one-level under the given context. +* `object`: Searches (or tests) just for the context object itself (if it matches the filter criteria). + +This option is only used when the admin connection is configured, i.e. when `system-user-dn` or `system-authentication` is defined. + +| `skip-authentication` +| `false` +a| Flag which disables password verification and instead adds `HazelcastPrincipal` instances to the +Subject. + +This option is only used when the admin connection is configured, i.e. when `system-user-dn` or `system-authentication` is defined. + +| `security-realm` +| +a| If specified, the given realm name is used for authentication of +a (temporary) Subject which is then used for doing LDAP queries. + +This option is only used when the admin connection is configured, i.e. when `system-user-dn` or `system-authentication` is defined. +|=== + +Detailed logging for LDAP authentication can be enabled by +configuring a more verbose logger level for the `com.hazelcast.security` +package as described in the xref:security-debugging.adoc[Security Debugging section]. + +NOTE: The LDAP authentication implementation provided by Hazelcast doesn't handle LDAP referrals, i.e. references to other LDAP trees. + +== TLS protected LDAP server connections + +The LDAP authentication type supports TLS protected connections +to LDAP servers, using the `ldaps` protocol scheme. TLS is +handled on the Java runtime side (JNDI API and URL handlers). + +When using TLS, by default the LDAP provider uses the socket factory `javax.net.ssl.SSLSocketFactory` to create a TLS socket to communicate +with the server, using the default JSSE configuration. By default, the server's +certificate is validated against Java default CA certificate store, and the hostname +in the LDAP URL is verified against the name(s) in the server certificate. This behavior +can be controlled globally by using `javax.net.ssl.*` properties, as the following example shows: + +[source,shell] +---- +java -Djavax.net.ssl.trustStore=/opt/hazelcast.truststore \ + -Djavax.net.ssl.trustStorePassword=123456 \ + -Djavax.net.ssl.keyStore=/opt/hazelcast.keystore \ + -Djavax.net.ssl.keyStorePassword=123456 \ + ... +---- + +There can be also properties specific to vendor or Java version that enable more +fine-grained control. Here is an example that disabls host name validation: + +[source,shell] +---- +-Dcom.sun.jndi.ldap.object.disableEndpointIdentification=true +---- + +When even more control is necessary, you can implement your own +`SSLSocketFactory` and use its class name as the value in the `ldap` +authentication option `socket-factory-class-name`. + +Here is an example custom socket factory class: + +[source,java] +---- +include::ROOT:example$/security/ldap/CustomSSLSocketFactory.java[] +---- + +The following example shows a possible authentication configuration: + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- +include::ROOT:example$/hazelcast-authentication-types.xml[tag=ldaps] +---- +-- + +YAML:: ++ +[source,yaml] +---- + realms: + - name: ldapsRealm + authentication: + ldap: + url: ldaps://ldapserver.acme.com + socket-factory-class-name: security.ldap.CustomSSLSocketFactory + role-mapping-attribute: cn +---- +==== + +LDAP authentication is backed by the JNDI API in Java and also has failover support. You can configure multiple space-separated +URLs in the `` option: + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- +include::ROOT:example$/hazelcast-authentication-types.xml[tag=ldap-fallback] +---- +-- + +YAML:: ++ +[source,yaml] +---- + realms: + - name: ldapFallbackRealm + authentication: + ldap: + url: ldap://ldap-master.example.com ldap://ldap-backup.example.com +---- +==== + +LDAP can also be used for role retrieval when xref:kerberos-authentication.adoc[Kerberos authentication] is used. diff --git a/docs/modules/security/pages/overview.adoc b/docs/modules/security/pages/overview.adoc index ec6ddd7db..7ef390fa1 100644 --- a/docs/modules/security/pages/overview.adoc +++ b/docs/modules/security/pages/overview.adoc @@ -1,11 +1,11 @@ = Security Overview +:page-aliases: cluster-member-security.adoc :page-enterprise: true This section provides an introduction to the security features of Hazelcast. -These features allow you to perform security activities, such as intercepting socket -connections and remote operations executed by the clients, encrypting the communications -between the members at socket level and using SSL socket communication. -All the security features explained in this chapter are the features of +These features allow you to perform security activities, such as encrypting network communication using TLS, +controlling access permissions of clients, or logging auditable events. +The security features explained in this chapter are the features of [blue]#Hazelcast {enterprise-product-name}# edition. While Hazelcast supports non-secured cluster members and clients, @@ -13,55 +13,66 @@ it is recommended to secure your deployments. A cluster without security may fac * unauthorized cluster members joining or accessing it * unwanted or malicious clients accessing it - * unauthorized use (access or creation) of cluster resources and data tampering by the malicious cluster members and clients. -And when using Hazelcast's Jet streaming engine, notice the following security considerations: - -* Hazelcast jobs allow you to use your custom codes and these codes must be available on -cluster classpath or deployed to the cluster; this means any client is able to deploy -custom codes to the cluster, so make sure each client is authorized to access the cluster. -* The Jet engine bypasses the access control layer when accessing the data structures in the same cluster. -* The connectors of the Jet engine include 3rd party codes which may increase the attack surface. -* SQL, which is used by the Jet engine, includes file connectors and it can read files on the cluster filesystem. - -Due to the above considerations, Hazelcast's streaming engine is disabled by default for our users who -mostly use Hazelcast's storage engine (formerly known as Hazelcast IMDG) with the JAR distribution -(See the xref:secure-cluster:security-defaults.adoc[Security Defaults section] for information about -the security considerations for different Hazelcast distributions). -xref:configuration:jet-configuration.adoc[Enabling the Jet Engine section] shows how you can -start using the Jet engine; relatedly, see the xref:secure-cluster:hardening-recommendations.adoc[Security Hardening Recommendations section] -to learn the best practices to secure your cluster. - Below, you can see the brief descriptions of Hazelcast's security features. You can evaluate them and decide which ones you want to use based on your security concerns and requirements. For data privacy: -* xref:security:tls-ssl.adoc[TLS/SSL] communication for members and clients for all socket-level communication; +* xref:security:tls-ssl.adoc[TLS] communication for members and clients for all socket-level communication; uses key stores and trust stores to encrypt communications across a Hazelcast cluster, -as well as between the clusters replicated over WAN. You can also configure -xref:security:tls-configuration.adoc#configuring-cipher-suites[cipher suites] to secure the network communication. +as well as between the clusters replicated over WAN. For authentication: -* xref:security:jaas-authentication.adoc[JAAS-based authentication] between -the cluster members and for pluggable identity verifications; -works with identity, role and endpoint principal implementations. -* xref:security:socket-interceptor.adoc[Socket Interceptor] to interfere socket connections -before a new member or client comes to the cluster; you can perform identity checking using custom -authentication protocols. +* xref:security:authentication-overview.adoc[authentication] between the cluster members, and for clients accessing the cluster. +Security Realms are used for authentication and identity configurations; * xref:security:tls-ssl.adoc#mutual-authentication[TLS Mutual Authentication] to ensure each TLS-communicating side proves its identity to the other. -* xref:security:security-realms.adoc[Security Realms] for authentication and identity configurations. For authorization: -* xref:security:native-client-security.adoc#authorization[JAAS-based authorization] using +* xref:security:client-authorization.adoc#authorization[authorization] using permission policies for role-based security. + +Hazelcast has a pluggable security component architecture allowing use your own code to control security: + +* pluggable xref:security:jaas-authentication.adoc[authentication] and xref:security:identity-configuration#credentials-factory[identity]; +* xref:security:socket-interceptor.adoc[Socket Interceptor] to interfere socket connections +before a new member or client comes to the cluster; you can perform identity checking using custom +authentication protocols. * xref:security:security-interceptor.adoc[Security Interceptor] that provides a callback -point for every operation executed against the cluster. +point for client operations executed against the cluster. See also xref:secure-cluster:hardening-recommendations.adoc[Security Hardening Recommendations section] -to learn more about the best security practices. \ No newline at end of file +to learn more about the best security practices. + +Security-related areas that are covered in other sections of the documentation include: + +* Network and Advanced Network configuration allow specifying bind interfaces; +* Advanced Network configuration allows separating socket addresses for different protocols; +* Management operations can be limited to specific IP addresses where Management Center is allowed to run; +* Untrusted deserialization protection allows control of which types are allowed in Java native deserialization; + +Hazelcast distributions contain security-hardened example configuration files that help users to review configuration sections related to deployment security. + +== Hazelcast Jet considerations + +When using Hazelcast's Jet streaming engine, notice the following security considerations: + +* Hazelcast jobs allow you to use your custom code and this code must be available on +cluster classpath or deployed to the cluster; this means any client is able to deploy +custom code to the cluster, so make sure each client is authorized to access the cluster. +* The Jet engine bypasses the access control layer when accessing the data structures in the same cluster. +* The connectors of the Jet engine include third-party code which may increase the attack surface. +* SQL, which is used by the Jet engine, includes file connectors and it can read files on the cluster filesystem. + +Due to the above considerations, access to Hazelcast's streaming engine is disabled by default for our users who +mostly use Hazelcast's storage engine (formerly known as Hazelcast IMDG) with the JAR distribution +(See the xref:secure-cluster:security-defaults.adoc[Security Defaults section] for information about +the security considerations for different Hazelcast distributions). +xref:configuration:jet-configuration.adoc[Enabling the Jet Engine section] shows how you can +start using the Jet engine; relatedly, see the xref:secure-cluster:hardening-recommendations.adoc[Security Hardening Recommendations section] +to learn the best practices to secure your cluster. diff --git a/docs/modules/security/pages/security-debugging.adoc b/docs/modules/security/pages/security-debugging.adoc index d4260f814..f1407266d 100644 --- a/docs/modules/security/pages/security-debugging.adoc +++ b/docs/modules/security/pages/security-debugging.adoc @@ -28,19 +28,28 @@ information by using the following system property: ``` See the -https://docs.oracle.com/javase/8/docs/technotes/guides/security/troubleshooting-security.html[Troubleshooting Security^] +https://docs.oracle.com/en/java/javase/17/security/troubleshooting-security.html[Troubleshooting Security^] Java guide for more information. == TLS debugging -To assist with the TLS/SSL issues, you can use the following +To assist with TLS issues, you can use the following system property: ``` -Djavax.net.debug=all ``` -This property provides a lot of logging output including the TLS/SSL +This property provides a lot of logging output including the TLS handshake, that can be used to determine the cause of the problem. See the -http://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/ReadDebug.html[Debugging TSL/SSL Connections^] -guide for more information. \ No newline at end of file +http://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/ReadDebug.html[Debugging TSL Connections^] +guide for more information. + +== Kerberos debugging + +In addition to the `debug` option available in `Krb5LoginModule` implementations, there are +Java system properties that can help you identify issues by printing more output related to Kerberos authentication: + +``` +-Dsun.security.krb5.debug=true -Dsun.security.jgss.debug=true -Dcom.ibm.security.krb5.Krb5Debug=all -Dcom.ibm.security.jgss.debug=all +``` diff --git a/docs/modules/security/pages/security-realms.adoc b/docs/modules/security/pages/security-realms.adoc deleted file mode 100644 index e5bf3b6b2..000000000 --- a/docs/modules/security/pages/security-realms.adoc +++ /dev/null @@ -1,1104 +0,0 @@ -= Security Realms -:page-enterprise: true - -Security realms allow configuring JAAS authentication and/or own identity -independently on the module which consumes this configuration. -The realm is a named configuration and other modules just reference it by name. - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- -include::ROOT:example$/hazelcast-security-realms.xml[tag=realms] ----- --- - -YAML:: -+ -[source,yaml] ----- - security: - enabled: true - realms: - - name: realm1 - authentication: - jaas: - - class-name: com.hazelcast.examples.MyRequiredLoginModule - usage: REQUIRED - properties: - property: value - identity: - credentials-factory: - class-name: com.hazelcast.examples.MyCredentialsFactory - properties: - property: value - member-authentication: - realm: realm1 - client-authentication: - realm: realm1 ----- -==== - -== Authentication Configuration - -There are several types of authentication configuration available in a security realm. -The realm cannot have more than one authentication method specified. - -The following are the available authentication types: - -* `jaas`: Defines JAAS login module stacks. -* `ldap`: Verifies `PasswordCredentials` against an LDAP server. -* `kerberos`: Verifies the Kerberos token provided in `TokenCredentials`. -* `tls`: Verifies that the TLS mutual authentication was used -in the incoming connection and the peer's certificate chain is available. - -=== JAAS Authentication Type - -The `jaas` authentication setting is the most flexible -form of authentication, but it requires knowledge -of JAAS login modules and related concepts. It allows using -custom login modules and ordering them in a login module stack. - -The following is a sample configuration which authenticates against an LDAP server or -database as a fallback: - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- -include::ROOT:example$/hazelcast-authentication-types.xml[tag=jaas] ----- --- - -YAML:: -+ -[source,yaml] ----- - realms: - - name: jaasRealm - authentication: - jaas: - - class-name: com.examples.LdapLoginModule - usage: SUFFICIENT - properties: - url: ldap://corp-ldap - - class-name: com.examples.DatabaseLoginModule - usage: SUFFICIENT - properties: - type: ora18 - host: corp-db - table: USERS ----- -==== - -For more details, see the xref:jaas-authentication.adoc[JAAS authentication section]. - -=== LDAP Authentication Type - -LDAP servers are one of the most popular identity stores. -They can track information about organization structure, -users, groups, servers and configurations. - -Hazelcast supports authentication and authorization against LDAP servers. -The authentication verifies the provided name and password. -The authorization part allows to map roles to the authenticated user. - -The password verification during the authentication is possible by: - -* making a new LDAP bind operation with the given name and password -* using a separate "admin connection" to verify the provided password -against an LDAP object attribute. - -The LDAP authentication allows also a role mapping. -As there are more ways how roles can be mapped in the LDAP, -Hazelcast provides several approaches to retrieve them: - -* `attribute`: The role name is stored as an attribute in the object representing the identity. -* `direct` mapping: The identity object contains an attribute with reference to the role object(s). -* `reverse` mapping: The role objects having a reference to the identity object are searched. - -The `direct` and `reverse` mapping modes also allow a role search recursion. - -[cols="1,1,3"] -.LDAP Configuration Options -|=== -| Option Name -| Default Value -| Description - -| `url` -| -| URL of the LDAP server. The value is configured as the JNDI environment -property, i.e., `java.naming.provider.url`. - -| `socket-factory-class-name` -| -| Socket factory class name. The factory can be used for fine-grained -configuration of the TLS protocol on top of the LDAP protocol, i.e., `ldaps` scheme. - -| `parse-dn` -| false -| If set to `true`, it treats the value of `role-mapping-attribute` as a DN and -extracts only the `role-name-attribute` values as role names. If set to `false`, -the whole value of `role-mapping-attribute` is used as a role name. - -This option is only used when the `role-mapping-mode` option has the value `attribute`. - -| `role-context` -| -| LDAP Context in which assigned roles are searched, e.g., `ou=Roles,dc=hazelcast,dc=com`. - -This option is only used when the `role-mapping-mode` option has the value `reverse`. - -| `role-filter` -| `([role-mapping-attribute]=\{MEMBERDN})` -| LDAP search string which usually contains a placeholder `\{MEMBERDN}` to be -replaced by the provided login name, e.g., `(member=\{MEMBERDN})`. - -If the role search recursion is enabled (see `role-recursion-max-depth`), the `\{MEMBERDN}` -is replaced by role DNs in the recurrent searches. - -This option is only used when the `role-mapping-mode` option has the value `reverse`. - -| `role-mapping-attribute` -| -| Name of the LDAP attribute which contains either the role name or role DN. - -This option is used when the `role-mapping-mode` option has the value `attribute` or `direct`. -If the mapping mode is `reverse`, the value is used in `role-filter` default value. - -| `role-mapping-mode` -| `attribute` -a| Role mapping mode. It can have one of the following values: - -* `attribute`: The user object in the LDAP contains directly role name in the -given attribute. Role name can be parsed from a DN string when `parse-dn=true` -No additional LDAP query is done to find assigned roles. -* `direct`: The user object contains an attribute with DN(s) of assigned -role(s). Role object(s) is/are loaded from the LDAP and the role name is -retrieved from its attributes. Role search recursion can be enabled for this mode. -* `reverse`: The role objects are located by executing an LDAP search query -with the given `role-filter`. In this case, the role object usually contains -attributes with DNs of the assigned users. Role search recursion can be enabled for this mode. - -| `role-name-attribute` -| -| This option may refer to a name of LDAP attribute within the role object which -contains the role name in case of `direct` and `reverse` role mapping mode. It may also refer -to the attribute name within X.500 name stored in `role-mapping-attribute` when -`role-mapping-mode=attribute` and `parse-dn=true`. - -| `role-recursion-max-depth` -| 1 -| Sets the maximum depth of role search recursion. The default value 1 means -the role search recursion is disabled. - -This option is only used when the `role-mapping-mode` option has value `direct` or `reverse`. - -| `role-search-scope` -| `subtree` -a| LDAP search scope used for `role-filter` search. It can have one of the following values: - -* `subtree`: Searches for objects in the given context and its subtree. -* `one-level`: Searches just one-level under the given context. -* `object`: Searches (or tests) just for the context object itself (if it matches the filter criteria). - -This option is only used when the `role-mapping-mode` option has the value `reverse`. - -| `user-name-attribute` -| `uid` -| LDAP attribute name whose value is used as a name in -`ClusterIdentityPrincipal` added to the JAAS Subject. - -| `system-user-dn` -| -a| Admin account DN. If configured, then the following are true: - -* For the user and role object, search queries are used an admin connection instead -of the "user" one created by LDAP bind with provided credentials. -* LDAP authentication doesn't expect the full user DN to be provided as a login name. -It rather expects names like `"jduke"` than `"uid=jduke,ou=Engineering,o=Hazelcast,dc=com"`; -* The admin connection allows verifying the provided user credentials against a -value defined in the `password-attribute` option. - -| `system-user-password` -| -| Admin's password (for `system-user-dn` account). - - -| `system-authentication` -| `simple` -| Name of the authentication mechanism used for the admin LDAP connection. -It's used as a value for JNDI environment property `Context#SECURITY_AUTHENTICATION`. -You can specify `GSSAPI` to authenticate with the Kerberos protocol. - -| `password-attribute` -| -| Credentials verification is done by the new LDAP binds by default. -Nevertheless, the password can be stored in a non-default LDAP attribute, -and in this case use `password-attribute` to configure against which -LDAP attribute (within the user object) is the provided password compared -during the login. As a result, if the `password-attribute` option is provided, -then the extra LDAP bind to verify credentials is not done and passwords -are just compared within the Hazelcast code after retrieving the user object from LDAP server. - -This option is only used when the admin connection is configured, i.e., when `system-user-dn` or `system-authentication` is defined. - -| `user-context` -| -| LDAP context in which the user objects are searched, e.g., `ou=Users,dc=hazelcast,dc=com`. - -This option is only used when the admin connection is configured, i.e., when `system-user-dn` or `system-authentication` is defined. - -| `user-filter` -| `(uid=\{login})` -| LDAP search string for retrieving the user objects based on the provided login name. -It usually contains a placeholder substring `\{login}` which is replaced by the provided login name. - -This option is only used when the admin connection is configured, i.e., when `system-user-dn` or `system-authentication` is defined. - -| `user-search-scope` -| `subtree` -a| LDAP search scope used for `user-filter` search. It can have one of the following values: - -* `subtree`: Searches for objects in the given context and its subtree. -* `one-level`: Searches just one-level under the given context. -* `object`: Searches (or tests) just for the context object itself (if it matches the filter criteria). - -This option is only used when the admin connection is configured, i.e., when `system-user-dn` or `system-authentication` is defined. - -| `skip-authentication` -| `false` -a| Flag which allows disabling password verification and -only takes care about filling `HazelcastPrincipal` instances into the -Subject. - -This option is only used when the admin connection is configured, i.e., when `system-user-dn` or `system-authentication` is defined. - -| `security-realm` -| -a| If specified, given realm name is used for authentication of -a (temporary) Subject which is then used for doing LDAP queries. - -This option is only used when the admin connection is configured, i.e., when `system-user-dn` or `system-authentication` is defined. -|=== - -Detailed logging for LDAP authentication can be enabled by -configuring a more verbose logger level for the `com.hazelcast.security` -package as described in the xref:security-debugging.adoc[Security Debugging section]. - -NOTE: The LDAP authentication implementation provided by Hazelcast doesn't handle LDAP referrals, i.e., references to other LDAP trees. - -==== TLS Protected LDAP Server Connections - -The LDAP authentication type supports TLS protected connections -to LDAP servers, using the `ldaps` protocol scheme. The TLS is -handled on the Java runtime side (JNDI API and URL handlers). - -When using TLS, the LDAP provider will, by default, use the socket factory, -`javax.net.ssl.SSLSocketFactory` for creating a TLS socket to communicate -with the server, using the default JSSE configuration. By default, the server's -certificate is validated against Java default CA certificate store and hostname -in LDAPs URL is verified against the name(s) in the server certificate. The behavior -can be controlled globally by using `javax.net.ssl.*` properties. Here is an example: - -[source,shell] ----- -java -Djavax.net.ssl.trustStore=/opt/hazelcast.truststore \ - -Djavax.net.ssl.trustStorePassword=123456 \ - -Djavax.net.ssl.keyStore=/opt/hazelcast.keystore \ - -Djavax.net.ssl.keyStorePassword=123456 \ - ... ----- - -There can be also properties specific to vendor or Java version allowing more -fine-grained control. Here is an example on disabling host name validation: - -[source,shell] ----- --Dcom.sun.jndi.ldap.object.disableEndpointIdentification=true ----- - -When even more control is necessary, you can implement your own -`SSLSocketFactory` and use its class name as the value in the `ldap` -authentication option `socket-factory-class-name`. - -Here is an example custom socket factory class: - -[source,java] ----- -include::ROOT:example$/security/ldap/CustomSSLSocketFactory.java[] ----- - -The authentication configuration could look like as follows: - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- -include::ROOT:example$/hazelcast-authentication-types.xml[tag=ldaps] ----- --- - -YAML:: -+ -[source,yaml] ----- - realms: - - name: ldapsRealm - authentication: - ldap: - url: ldaps://ldapserver.acme.com - socket-factory-class-name: security.ldap.CustomSSLSocketFactory - role-mapping-attribute: cn ----- -==== - -The LDAP authentication is backed by the JNDI API in Java. -It has also the failover support. You can configure multiple space-separated -URLs in the `` option: - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- -include::ROOT:example$/hazelcast-authentication-types.xml[tag=ldap-fallback] ----- --- - -YAML:: -+ -[source,yaml] ----- - realms: - - name: ldapFallbackRealm - authentication: - ldap: - url: ldap://ldap-master.example.com ldap://ldap-backup.example.com ----- -==== - -[[kerberos-authentication]] -=== Kerberos Authentication Type - -The Kerberos authentication protocol is one of the standard solutions -for single sign-on (SSO). It's well established in many companies. Hazelcast -supports Kerberos authentication as an {enterprise-product-name} feature and it also -provides Kerberos integration to LDAP-based authorization. - -The Kerberos support in Hazelcast has 2 configuration parts: identity -and authentication. The identity part is responsible for retrieving the service -ticket from Kerberos KDC (Key Distribution Center). -The authentication part verifies the service tickets. - -Default Service principal names for Hazelcast members are in the form -`hz/address@REALM`, for example `hz/192.168.1.1@ACME.COM`. - -Before a service ticket is issued, the client side of the connection has to be -authenticated, which means the TGT (Ticket Granting Ticket) is present in the Subject. - -Hazelcast delegates the TGT retrieval to vendor specific `Krb5LoginModule` -implementations (find the correct login module and its options in your Java -documentation). On the Hazelcast side, the `security-ream` property allows -referencing another realm with `Krb5LoginModule` configured. - -[tabs] -==== -Sample Kerberos Identity Configuration XML:: -+ --- - -[source,xml] ----- - - - - ACME.COM - krb5Initiator - - - - - - - - - true - true - - - - - ----- --- - -YAML:: -+ -[source,yaml] ----- - realms: - - name: kerberosRealm - identity: - kerberos: - realm: ACME.COM - security-realm: krb5Initiator - - name: krb5Initiator - authentication: - jaas: - class-name: com.sun.security.auth.module.Krb5LoginModule - properties: - useTicketCache: true - doNotPrompt: true ----- -==== - -The `` identity configuration has the following properties: - -[cols="1,1,3",options="header",] -.The Identity Configuration Options -|======================================================================= -| Property name -| Default value -| Description - -| `spn` -| -| Allows configuring static Service Principal Name (SPN). It's -meant for use cases where all the members share a single Kerberos identity. - -| `service-name-prefix` -| `"hz/"` -| Defines the prefix of SPN. By default the member's -principal name (for which this credentials -factory asks the service ticket) is in the form -`"[servicePrefix][memberIpAddress]@[REALM]"`, e.g., -`"hz/192.168.1.1@ACME.COM"`. - -| `realm` -| -| Kerberos realm name, e.g., `"ACME.COM"`. - -| `security-realm` -| -| Security realm name in the Hazelcast configuration used -for Kerberos authentication. The authentication configuration in the -referenced security realm will be used to fill the Subject with the Kerberos -credentials, e.g., TGT. - -| `use-canonical-hostname` -| `false` -| Flag which controls if canonical hostnames should be used instead of IP addresses -in generated Service Principal names. -This property is only used when Service Principal name is not static, i.e., when `spn` option -is not configured). - -| `principal` -| -| Kerberos principal name. This is a helper option which can be used together -with the `keytab-file` to replace the `security-realm` configuration. - -_We don't recommend using this property in production!_ - -| `keytab-file` -| -| Path to a keytab file with the current principal's secrets. -This is a helper option which can be used together -with the `principal` to replace the `security-realm` configuration. - -_We don't recommend using this property in production!_ -|======================================================================= - -The authenticating part on the server side is able to -accept the Kerberos tickets and verify them. Again the Kerberos -authentication is delegated to another realm with the Kerberos login module -configured. - -[tabs] -==== -Sample Kerberos Identity Configuration XML:: -+ --- - -[source,xml] ----- - - - - krb5Acceptor - - - - - - - - - false - false - true - true - true - hz/192.168.1.1@ACME.COM - /opt/member1.keytab - - - - - ----- --- - -YAML:: -+ -[source,yaml] ----- - realms: - name: kerberosRealm - authentication: - kerberos: - security-realm: krb5Acceptor - name: krb5Acceptor - authentication: - jaas: - - class-name: com.sun.security.auth.module.Krb5LoginModule - usage: REQUIRED - properties: - isInitiator: false - useTicketCache: false - doNotPrompt: true - useKeyTab: true - storeKey: true - principal: hz/192.168.1.1@ACME.COM - keyTab: /opt/member1.keytab ----- -==== - -The `krb5Acceptor` realm configuration in the snippet only loads the Kerberos secrets from -a keytab file and it doesn't authenticate against a KDC. - - -[cols="1,1,3",options="header",] -.The Authentication Configuration Options -|======================================================================= -| Property name -| Default value -| Description - -| `relax-flags-check` -| `false` -| Allows disabling some of the checks on the -incoming token, e.g., passes authentication even if the mutual -authentication is required by the token. - -| `use-name-without-realm` -| `false` -| When set to `true`, then the Kerberos realm part is removed from the -authenticated name, e.g., `"jduke@ACME.COM"` becomes just `"jduke"`. - -| `security-realm` -| -|Security realm name in the Hazelcast configuration used -for Kerberos authentication. The authentication configuration in the -referenced security realm will be used to fill the Subject with the Kerberos -credentials, e.g., Keytab. - -| `principal` -| -| Kerberos principal name. This is a helper option which can be used together -with the `keytab-file` to replace the `security-realm` configuration. - -_We don't recommend using this property in production!_ - -| `keytab-file` -| -| Path to a keytab file with the current principal's secrets. -This is a helper option which can be used together -with the `principal` to replace the `security-realm` configuration. - -_We don't recommend using this property in production!_ -|======================================================================= - -The `GssApiLoginModule` (implementing Kerberos authentication) -derives from the abstract `ClusterLoginModule`. As a result the `` -configuration supports the common options, too: `skip-identity`, `skip-endpoint` and -`skip-role`. - -[NOTE] -==== -* The Kerberos authentication in Hazelcast is only able to validate connections on -the server side. It doesn't support mutual authentication. -* The Generic Security Services API (GSS-API) is not used for protecting (wrapping) -the messages after the authentication, e.g., encryption, integrity checks. It's only used for -accepting tokens. -* The token itself is not protected against Man-in-the-Middle (MITM) attacks. -If an attacker is able to eavesdrop the token and use it before the -original sender, then the attacker succeeds with the authentication but -the original sender won't. -** There is a replay protection in Java which caches the already used tokens. -** Java Kerberos implementation accepts the token for 5 minutes (by default) -from its creation. -* Time has to be synchronized on the machines where the Kerberos is -used. - -If you are running Hazelcast in an untrusted network with a MITM attack -risk, then enable encryption on Hazelcast protocols to prevent stealing -the token. -==== - -=== Kerberos and LDAP integration - -The Kerberos authentication allows loading role mapping information from -an LDAP server (usually the one backing the Kerberos KDC server, too). -Therefore, the `` authentication configuration is also available as -sub-configuration of the `` authentication. - -[tabs] -==== -Sample Kerberos Identity Configuration XML:: -+ --- - -[source,xml] ----- - - - - true - krb5Acceptor - - ldap://ldap.hazelcast.com - GSSAPI - memberOf - krb5Initiator - (krb5PrincipalName=\{login}) - true - - - - ----- --- - -YAML:: -+ -[source,yaml] ----- - realms: - - name: kerberosRealm - authentication: - kerberos: - skip-role: true - security-realm: krb5Acceptor - ldap: - url: ldap://ldap.hazelcast.com - system-authentication: GSSAPI - security-realm: krb5Initiator - skip-authentication: true - user-filter: "(krb5PrincipalName=\{login})" - role-mapping-attribute: memberOf ----- -==== - -NOTE: The Kerberos-LDAP integration doesn't support credentials delegation, -i.e., reusing client's ticket for accessing the LDAP. It only allows using -the member's Kerberos credentials to authenticate into the LDAP. - -=== Simplified Kerberos Configuration - -To simplify the Kerberos configuration process for new users, Hazelcast allows -skipping `Krb5LoginModule` JAAS configuration within separate security realms. -Instead, it's possible to define the `principal` and `keytab-file` options in the -`kerberos` identity and authentication configurations. -If these options are used instead of the `security-realm`, then a new temporary -realm is generated on the fly during the authentication. - -[tabs] -==== -Sample Kerberos Identity Configuration XML:: -+ --- - -[source,xml] ----- - - - - hz/127.0.0.1@HAZELCAST.COM - /opt/localhost.keytab - - - - - HAZELCAST.COM - hz/127.0.0.1@HAZELCAST.COM - /opt/localhost.keytab - - - ----- --- - -YAML:: -+ -[source,yaml] ----- - realms: - - name: simpleKerberosRealm - authentication: - kerberos: - principal: hz/127.0.0.1@HAZELCAST.COM - keytab-file: /opt/localhost.keytab - identity: - kerberos: - realm: HAZELCAST.COM - principal: hz/127.0.0.1@HAZELCAST.COM - keytab-file: /opt/localhost.keytab ----- -==== - -A warning is logged during the first usage of the simplified configuration form. -It includes the generated configuration, so you can use it as a starting point -to define the full Kerberos configuration. An example warning log is shown below: - -``` -12:37:41,187 WARN [KerberosCredentialsFactory] Using generated Kerberos initiator -realm configuration is not intended for production use. It's recommended -to properly configure the Krb5LoginModule manually to fit your needs. -Following configuration was generated from provided keytab and principal properties: - - - - - - true - true - true - true - true - /opt/localhost.keytab - hz/127.0.0.1@HAZELCAST.COM - - - - - -``` - -=== TLS Authentication Type - -Hazelcast is able to protect network communication using TLS. -The TLS mutual authentication is also supported. It means not only the -server side identifies itself to a client side (member, client, REST client, etc.), -but also the client side needs to prove its identity by using a TLS (X.509) certificate. - -The `tls` authentication type verifies within the JAAS authentication -that the incoming connection already authenticated the client's TLS certificate. -A `ClusterIdentityPrincipal` uses the subject DN (distinguished name) -from the client's TLS certificate. - -This authentication type is able to parse a role name from the client's certificate -subject DN. The `` element has an attribute, `roleAttribute`, which specifies -a part of DN to be used as a role name. - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- -include::ROOT:example$/hazelcast-authentication-types.xml[tag=tls] ----- --- - -YAML:: -+ -[source,yaml] ----- -realms: - name: tlsRealm - authentication: - tls: - roleAttribute: cn ----- -==== - -This `tls` authentication uses `cn` attribute from the subject DN as the role name. -If the subject DN in the certificate is `cn=admin,ou=Devs,o=Hazelcast` for instance, -then the following `Principals` are added: - -* `ClusterIdentityPrincipal: CN=admin,OU=Devs,O=Hazelcast` -* `ClusterRolePrincipal: admin` -* `ClusterEndpointPrincipal: [remote address of the connecting party]` - -== Identity Configuration - -The Identity configuration allows defining own <>. -These Credentials are used to authenticate to other systems. - -Available identity configuration types are as follows: - -* `username-password`: Defines a new `PasswordCredentials` object. -* `token`: Defines a new `TokenCredentials` object. -* `kerberos`: Defines the Kerberos identity which uses the -service tickets stored in the `TokenCredentials` object. -* `credentials-factory`: Configures the factory class which creates the `Credentials` objects. - -[[credentials]] -=== Credentials - -One of the key elements in Hazelcast security is the `Credentials` object, which -represents evidence of the identity (member or client). -The content of `Credentials` object is verified during the authentication. -Credentials is an interface which extends `Serializable`. - -[source,java] ----- -public interface Credentials extends Serializable { - String getName(); -} ----- - -There are two subtype interfaces which simplify the `Credentials` usage. -The subtypes reflect data provided in the client authentication messages: - -* Name and password (`com.hazelcast.security.PasswordCredentials`) -* Byte array token (`com.hazelcast.security.TokenCredentials`) - -The interfaces have the following forms: - -[source,java] ----- -public interface PasswordCredentials extends Credentials { - String getPassword(); -} ----- - -[source,java] ----- -public interface TokenCredentials extends Credentials { - byte[] getToken(); - - default Data asData() { - return new HeapData(getToken()); - } -} ----- - -The `Credentials` instance can be retrieved in the login modules -by handling a `CredentialsCallback`. - -Here is an example: - -[source,java] ----- -include::ROOT:example$/security/CustomLoginModuleTest.java[tag=credentials-callback] ----- - -=== Password Credentials - -A `PasswordCredentials` implementation can be configured as a -simple identity representation. It is configured by the `` -XML configuration element as shown below: - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- -include::ROOT:example$/hazelcast-password-realm.xml[tag=password] ----- --- - -YAML:: -+ -[source,yaml] ----- -realms: - name: passwordRealm - identity: - username-password: - username: member1 - password: s3crEt -member-authentication: - realm: passwordRealm ----- -==== - -The equivalent programmatic configuration is shown below: - -[source,java] ----- -include::ROOT:example$/SecurityXmlTest.java[tag=password-realm] ----- - -=== Token Credentials - -`TokenCredentials` instances are also simply configurable for -identity representation. The `` XML configuration element -allows using either plain ASCII tokens or Base64 encoded values. -Its optional argument `encoding` can have either `base64` or `none` (default) -as its value. - -The following two realms define the same token value - bytes of the "Hazelcast" string: - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- -include::ROOT:example$/hazelcast-authentication-types.xml[tag=token] ----- --- - -YAML:: -+ -[source,yaml] ----- -realms: - - name: tokenRealm1 - identity: - token: - value: Hazelcast - - name: tokenRealm2 - identity: - token: - encoding: base64 - value: SGF6ZWxjYXN0 ----- -==== - -The equivalent programmatic configuration is as follows: - -[source,java] ----- -include::ROOT:example$/SecurityXmlTest.java[tag=token-realm] ----- - -=== Kerberos Identity - -The `kerberos` identity type is used to retrieve Kerberos service tickets to access -a member with the `kerberos` authentication type configured. The resulting tickets -are `TokenCredentials` instances. Read more about `kerberos` identity in -the <>. - -=== Credentials Factory - -The most flexible way to define the `Credentials` objects -is using a custom credential factory. It is an implementation -of `com.hazelcast.security.ICredentialsFactory` -interface. Its `newCredentials()` method is the one which provides credentials. - -The XML configuration uses `` element to define the factory class. - -The behavior of credential factories can be controlled by specifying factory properties. -The properties are provided in the `init(Properties)` method. - -A sample configuration is shown below: - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- -include::ROOT:example$/hazelcast-authentication-types.xml[tag=credentialsFactoryRealm] ----- --- - -YAML:: -+ -[source,yaml] ----- -realms: - name: credentialsFactoryRealm - identity: - credentials-factory: - class-name: com.examples.TOTPCredentialsFactory - properties: - seed: 3132333435363738393031323334353637383930 ----- -==== - -== Security Realms on the Client Side - -Hazelcast offers limited support for security realms in native clients. -The configuration allows specifying JAAS login modules which can be referenced from -the Kerberos identity configuration. - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- - - - ACME.COM - krb5Initiator - - - - - - - - true - true - - - - - - - ----- --- - -YAML:: -+ -[source,yaml] ----- -security: - kerberos: - realm: ACME.COM - security-realm: krb5Initiator - realms: - name: krb5Initiator - authentication: - jaas: - class-name: com.sun.security.auth.module.Krb5LoginModule - usage: REQUIRED - properties: - useTicketCache: true - doNotPrompt: true ----- -==== diff --git a/docs/modules/security/pages/simple-authentication.adoc b/docs/modules/security/pages/simple-authentication.adoc index e490c0b8e..93aa08c88 100644 --- a/docs/modules/security/pages/simple-authentication.adoc +++ b/docs/modules/security/pages/simple-authentication.adoc @@ -2,20 +2,15 @@ [[simple-authentication]] :page-enterprise: true -The simple authentication enables you to define users and their roles **directly** in the +Simple authentication enables you to define users and their roles **directly** in the Hazelcast member configuration. -The xref:default-authentication.adoc[default authentication] is based on the member's -identity configuration (when defined) or cluster name (otherwise); it does not -allow defining users and assigning them roles. +Compared to advanced authentication methods, with simple authentication you don't need additional infrastructure for Hazelcast's enterprise-level authentication (LDAP server, Kerberos, etc.). You also don't need to provide custom login module implementations as described in xref:jaas-authentication.adoc[JAAS-based authentication]. -And when using the advanced authentication methods, you either need additional infrastructure for Hazelcast's enterprise-level authentication (LDAP server, Kerberos, etc.) or you need to provide your login module implementations in xref:jaas-authentication.adoc[JAAS-based authentication]. - -Simple authentication closes the gap between the default authentication and +Simple authentication closes the gap between default authentication and advanced authentication methods. -An example security configuration with the simple authentication used for client protocol is shown below. -The configuration should be done on the member side. +An example security configuration with simple authentication used for client protocol is shown below, with configuration done on the member side. [tabs] ==== @@ -81,7 +76,7 @@ hazelcast: ==== You can also provide multiple roles within a single role configuration element using comma -as the separator. See the below example: +separated values, as shown below: [tabs] ==== @@ -124,12 +119,10 @@ hazelcast: ---- ==== -You should not use the comma character in the role names since it is the -default role separator. However, in some cases (for example when using String based -login modules), you may want to use the comma character in a role name. For this, -you need to specify a different role separator character using the `role-separator` element -so that Hazelcast understands the default separator is changed. See the below example where -we set the separator character as `&`: +You should not use the comma character in role names because it's the +default role separator. However, if you need to use a comma character +in a role name, you can specify a different role separator character using +the `role-separator` element. The following example sets the separator character to `&`: [tabs] ==== diff --git a/docs/modules/security/pages/tls-authentication.adoc b/docs/modules/security/pages/tls-authentication.adoc new file mode 100644 index 000000000..3103a8c81 --- /dev/null +++ b/docs/modules/security/pages/tls-authentication.adoc @@ -0,0 +1,90 @@ += TLS Authentication +:page-enterprise: true + +Hazelcast is able to protect network communication using TLS. +TLS mutual authentication is also supported, which means that not only does the +server side have to identify itself to a client (member, client, REST client, etc.), +but the client side also needs to prove its identity by using a TLS (X.509) certificate. + +The `tls` authentication type verifies during Hazelcast authentication +that the incoming connection has already authenticated the client's TLS certificate. + +This authentication type is able to parse a role name (or names) from the client's certificate +subject DN. The `roleAttribute` property specifies the attribute name (a part of the Subject's DN) +to be used as a role name in Hazelcast. + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- + + + + + REQUIRED + /opt/hazelcast-keystore.p12 + secret.123 + /opt/hazelcast-truststore.p12 + changeit + + + + + + + + + + + + + + +---- +-- + +YAML:: ++ +[source,yaml] +---- +hazelcast: + network: + ssl: + enabled: true + properties: + mutualAuthentication: REQUIRED + keyStore: /opt/hazelcast-keystore.p12 + keyStorePassword: secret.123 + trustStore: /opt/hazelcast-truststore.p12 + trustStorePassword: changeit + security: + enabled: true + realms: + - name: tlsRealm + authentication: + tls: + roleAttribute: cn + client-authentication: + realm: tlsRealm +---- +==== + +This `tls` authentication uses `cn` attribute from the subject DN as the role name. +For example, if the subject DN in the certificate is `cn=admin,ou=Devs,o=Hazelcast` then the `"admin"` role name is assigned to the client. + +[cols="1,1,3"] +.TLS Configuration Options +|=== +| Option Name +| Default Value +| Description + +| `roleAttribute` +| `cn` +| Name of an attribute in client certificate's distinguished name (DN), where the attribute value is used as a Role name. + +|=== diff --git a/docs/modules/security/pages/tls-configuration.adoc b/docs/modules/security/pages/tls-configuration.adoc index 8a92b9a40..e0e03f2ea 100644 --- a/docs/modules/security/pages/tls-configuration.adoc +++ b/docs/modules/security/pages/tls-configuration.adoc @@ -2,7 +2,7 @@ [[other-tls-related-configuration]] :page-enterprise: true -== TLS/SSL for Hazelcast Management Center +== TLS for Hazelcast Management Center In order to use a secured communication between the Hazelcast cluster and Management Center, you have to configure Management Center as explained in the @@ -161,20 +161,13 @@ of preference. You can configure a member and client with different cipher suites; but there should be at least one shared cipher suite. -One of the cipher suites that gave very low overhead but still provides solid security -is `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`. -However, in our measurements this cipher suite only performs well using OpenSSL; using -the regular Java TLS integration, it performs -badly. So keep that in mind when configuring a client using regular SSL and a member -using OpenSSL. - Please check with your security expert to determine which cipher suites are appropriate and run performance tests to see which ones perform well in your environment. If you don't configure the cipher suites, then both client and/or member determine a cipher -suite by themselves during the TLS/SSL -handshake. This can lead to suboptimal performance and lower security than required. +suite by themselves during the TLS handshake. +This can lead to suboptimal performance and lower than required security. == Other Ways of Configuring Properties diff --git a/docs/modules/security/pages/tls-ssl.adoc b/docs/modules/security/pages/tls-ssl.adoc index ad8664741..b854b5d47 100644 --- a/docs/modules/security/pages/tls-ssl.adoc +++ b/docs/modules/security/pages/tls-ssl.adoc @@ -2,88 +2,20 @@ [[tlsssl]] :page-enterprise: true -NOTE: You cannot use TLS/SSL when xref:encryption.adoc[Symmetric Encryption] -is enabled. Also note that the symmetric encryption feature has been deprecated. - -You can use the TLS (Transport Layer Security) +You can use the Transport Layer Security (TSL) protocol to establish an encrypted communication across your Hazelcast cluster with key stores and trust stores. -NOTE: It is NOT recommended to reuse the key stores and trust stores -for external applications. - -NOTE: Using TLS/SSL may have an impact on the cluster performance. -See the xref:cluster-performance:performance-tuning.adoc#tls-ssl-perf[TLS/SSL Tuning section] for more information about the performance considerations. - -== TLS/SSL for Hazelcast Members - Hazelcast allows you to encrypt socket level communication between -Hazelcast members and between Hazelcast clients and members, for end-to-end encryption. To use it, you need to implement -`com.hazelcast.nio.ssl.SSLContextFactory` and configure the SSL section -in the network configuration. - -The following is the implementation code snippet: - -[source,java] ----- -public class MySSLContextFactory implements SSLContextFactory { - public void init( Properties properties ) throws Exception { - } - - public SSLContext getSSLContext() { - ... - SSLContext sslCtx = SSLContext.getInstance( "the protocol to be used" ); - return sslCtx; - } -} ----- - -The following is the base declarative configuration for the -implemented `SSLContextFactory`: - -[tabs] -==== -XML:: -+ --- - -[source,xml] ----- - - ... - - - - com.hazelcast.examples.MySSLContextFactory - - - bar - - - - ... - ----- --- +Hazelcast members and between Hazelcast clients and members, for end-to-end encryption. +To use it, you need configure the `ssl` section in the network configuration. -YAML:: -+ -[source,yaml] ----- -hazelcast: - network: - ssl: - enabled: true - factory-class-name: com.hazelcast.examples.MySSLContextFactory - properties: - foo: bar ----- -==== +NOTE: SSL (Secure Sockets Layer) is the predecessor protocol to TLS (Transport Layer Security). +Both protocols are designed to encrypt and secure data transmitted over networks, +but SSL is now considered outdated and has been replaced by TLS for improved security. +Hazelcast uses `ssl` naming for keeping backward compatibility in the configuration. -Hazelcast provides a default SSLContextFactory, -`com.hazelcast.nio.ssl.BasicSSLContextFactory`, which uses the configured -keystore to initialize `SSLContext`; see the following example configuration -for TLS/SSL. +== TLS for Hazelcast Members [tabs] ==== @@ -94,12 +26,9 @@ XML:: [source,xml] ---- - ... + - - com.hazelcast.nio.ssl.BasicSSLContextFactory - TLSv1.2 REQUIRED @@ -113,7 +42,7 @@ XML:: - ... + ---- -- @@ -126,7 +55,6 @@ hazelcast: network: ssl: enabled: true - factory-class-name: com.hazelcast.nio.ssl.BasicSSLContextFactory properties: protocol: TLSv1.2 mutualAuthentication: REQUIRED @@ -169,7 +97,7 @@ require it See the <>. * `ciphersuites`: Comma-separated list of cipher suite names allowed to be used. Its default value are all supported suites in your Java runtime. -* `protocol`: Name of the algorithm which is used in your TLS/SSL. Its +* `protocol`: Name of the algorithm used by TLS. Its default value is `TLS`. Available values are: ** `TLS` ** `TLSv1` (deprecated) @@ -189,7 +117,7 @@ A negative value such as `PT-1s` means the key material will be cached indefinit A zero-value duration expression such as `PT0s` means the key material will not be cached and will always be newly loaded for each TLS-protected connection. The key material is cached indefinitely if the new property is not specified (default value). -== TLS/SSL for Hazelcast Clients +== TLS for Hazelcast Clients The TLS configuration in Hazelcast clients is very similar to member configuration. @@ -205,19 +133,16 @@ XML:: ... - - com.hazelcast.nio.ssl.BasicSSLContextFactory - TLSv1.2 - /opt/hazelcast-client.truststore + /opt/client-truststore.p12 changeit - JKS + PKCS12 - /opt/hazelcast-client.keystore - clientsSecret - JKS + /opt/client-keystore.p12 + secret.123 + PKCS12 @@ -234,23 +159,20 @@ hazelcast-client: network: ssl: enabled: true - factory-class-name: com.hazelcast.nio.ssl.BasicSSLContextFactory properties: protocol: TLSv1.2 - trustStore: /opt/hazelcast-client.truststore + trustStore: /opt/client-truststore.p12 trustStorePassword: changeit - trustStoreType: JKS + trustStoreType: PKCS12 # Following properties are only needed when the mutual authentication is used. - keyStore: /opt/hazelcast-client.keystore + keyStore: /opt/client-keystore.p12 keyStorePassword: clientsSecret - keyStoreType: JKS + keyStoreType: PKCS12 ---- ==== -The same `BasicSSLContextFactory` properties used for members are available -on clients. Clients don't need to set `mutualAuthentication` property as it's used in configuring the server side of TLS connections. @@ -314,8 +236,8 @@ against member's truststore, the client is not authenticated. * `OPTIONAL`: Server asks for client certificate, but client is not required to provide any valid certificate. -NOTE: When a new client is introduced with a new keystore, the -truststore on the member side should be updated accordingly to +NOTE: When a new client is introduced with an untrusted certificate (e.g. a self-signed one), +the truststore on the member side should be updated accordingly to include new clients' information to be able to accept it. See the below example snippet to see the full configuration on the @@ -339,14 +261,84 @@ incoming TLS connections without verifying if the connecting side is trusted. Therefore, it's recommended to require the mutual authentication in Hazelcast members configuration. -== TLS/SSL for WAN Replication +== TLS for WAN Replication -Hazelcast allows you to secure the communications between the -WAN replicated clusters using TLS/SSL. WAN connections, cluster members -and clients can have their own unique TLS/SSL certificates. You can also -choose to have TLS/SSL configured on some of the members/clients and not on +Hazelcast allows you to secure the communications between WAN replicated clusters using TLS. WAN connections, cluster members +and clients can have their own unique TLS certificates. You can also +choose to have TLS configured on some of the members/clients and not on the others. -You can configure TLS/SSL for WAN replication using the advanced network configuration. +You can configure TLS for WAN replication using the advanced network configuration. See the xref:wan:advanced-features.adoc#securing-wan-connections.adoc[Securing the Connections for WAN Replication section] for the details. + +== Customize TLS + +You can customize TLS behavior by implementing your own `com.hazelcast.nio.ssl.SSLContextFactory` which allows building +a custom `javax.net.ssl.SSLContext` object. + +The following is an example code snippet for this: + +[source,java] +---- +public class MySSLContextFactory implements SSLContextFactory { + public void init( Properties properties ) throws Exception { + } + + public SSLContext getSSLContext() { + // ... + SSLContext sslCtx = SSLContext.getInstance( "the protocol to be used" ); + return sslCtx; + } +} +---- + +The following example shows the base declarative configuration for the +implemented `SSLContextFactory`: + +[tabs] +==== +XML:: ++ +-- + +[source,xml] +---- + + ... + + + + com.hazelcast.examples.MySSLContextFactory + + + bar + + + + ... + +---- +-- + +YAML:: ++ +[source,yaml] +---- +hazelcast: + network: + ssl: + enabled: true + factory-class-name: com.hazelcast.examples.MySSLContextFactory + properties: + foo: bar +---- +==== + +Hazelcast provides a default `SSLContextFactory`, +`com.hazelcast.nio.ssl.BasicSSLContextFactory`, which uses the configured +keystore to initialize `SSLContext`. + +NOTE: Using TLS may have an impact on the cluster performance. For more information, see xref:cluster-performance:performance-tips.adoc#tls-ssl-perf[TLS tuning] + +IMPORTANT: We do not recommended reusing key stores and trust stores for external applications. In addition, xref:encryption.adoc[Symmetric Encryption], which has been deprecated, can't be used with TLS. diff --git a/docs/modules/security/partials/security-nav.adoc b/docs/modules/security/partials/security-nav.adoc deleted file mode 100644 index 42982f7f3..000000000 --- a/docs/modules/security/partials/security-nav.adoc +++ /dev/null @@ -1,18 +0,0 @@ -* xref:security:overview.adoc[] -** xref:security:management-center.adoc[Management Center] -** xref:security:enabling-jaas.adoc[] -** xref:security:socket-interceptor.adoc[] -** xref:security:security-interceptor.adoc[] -** xref:security:encryption.adoc[] -** xref:security:tls-ssl.adoc[] -** xref:security:integrating-openssl.adoc[] -** xref:security:tls-configuration.adoc[] -** xref:security:validating-secrets.adoc[] -** xref:security:security-realms.adoc[] -** xref:security:jaas-authentication.adoc[] -** xref:security:cluster-member-security.adoc[] -** xref:security:default-authentication.adoc[] -** xref:security:native-client-security.adoc[] -** xref:security:logging-auditable-events.adoc[] -** xref:security:security-debugging.adoc[] -** xref:security:fips-140-2.adoc[] diff --git a/docs/modules/spring/pages/configuration.adoc b/docs/modules/spring/pages/configuration.adoc index 3abae75dd..ed5c1cd39 100644 --- a/docs/modules/spring/pages/configuration.adoc +++ b/docs/modules/spring/pages/configuration.adoc @@ -196,12 +196,13 @@ While configuring Hazelcast within the Spring context, you can also pass values + [WARNING] ==== -If you use security to connect the Hazelcast client with Spring Boot to a cluster and want to see the health of that cluster, +If you connect via the Hazelcast client with Spring Boot to a cluster with security enabled and want to see the health of that cluster, you must enable the permissions for transactions. For further information, see the following topics: -* xref:security:native-client-security.adoc#authenticating-clients[Using client security] +* xref:security:authentication-overview.adoc[] +* xref:clients:java.adoc#client-security-configuration[Java Client Security] * xref:maintain-cluster:monitoring.adoc#health-check-and-monitoring[Enabling health check for a cluster] -* xref:security:native-client-security#transaction-permission[Configuring transaction permissions on the members] +* xref:security:client-authorization.adoc#transaction-permission[Configuring transaction permissions on the members] ==== + * **Hazelcast Supported Type Configurations and Examples** diff --git a/docs/modules/sql/pages/create-data-connection.adoc b/docs/modules/sql/pages/create-data-connection.adoc index 94685e07a..a2d931519 100644 --- a/docs/modules/sql/pages/create-data-connection.adoc +++ b/docs/modules/sql/pages/create-data-connection.adoc @@ -68,7 +68,7 @@ Replacing a data connection will not affect any queries that are already running == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:native-client-security.adoc#sql-permission[SQL Permissions]. +If xref:security:enable-security.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:client-authorization.adoc#sql-permission[SQL Permissions]. [[examples]] == Examples diff --git a/docs/modules/sql/pages/create-index.adoc b/docs/modules/sql/pages/create-index.adoc index 4e79c8f1b..425affc43 100644 --- a/docs/modules/sql/pages/create-index.adoc +++ b/docs/modules/sql/pages/create-index.adoc @@ -79,7 +79,7 @@ For details, see xref:query:indexing-maps.adoc#bitmap-indexes[Bitmap Indexes]. == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:native-client-security.adoc#sql-permission[SQL Permissions]. +If xref:security:enable-security.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:client-authorization.adoc#sql-permission[SQL Permissions]. == Examples diff --git a/docs/modules/sql/pages/create-mapping.adoc b/docs/modules/sql/pages/create-mapping.adoc index 09912fab2..c188dc45b 100644 --- a/docs/modules/sql/pages/create-mapping.adoc +++ b/docs/modules/sql/pages/create-mapping.adoc @@ -150,7 +150,7 @@ OPTIONS ( == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:native-client-security.adoc#sql-permission[SQL Permissions]. +If xref:security:enable-security.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:client-authorization.adoc#sql-permission[SQL Permissions]. == Auto-resolving Columns and Options diff --git a/docs/modules/sql/pages/create-view.adoc b/docs/modules/sql/pages/create-view.adoc index f9fe45a0c..dd81da500 100644 --- a/docs/modules/sql/pages/create-view.adoc +++ b/docs/modules/sql/pages/create-view.adoc @@ -59,7 +59,7 @@ columns are in `information_schema.columns` table. == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:native-client-security.adoc#sql-permission[SQL Permissions]. +If xref:security:enable-security.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:client-authorization.adoc#sql-permission[SQL Permissions]. == Examples @@ -71,4 +71,4 @@ AS SELECT FirstName, LastName FROM employees WHERE performance > 70 ; -``` \ No newline at end of file +``` diff --git a/docs/modules/sql/pages/drop-mapping.adoc b/docs/modules/sql/pages/drop-mapping.adoc index ec1e54e1a..17dbc3dd7 100644 --- a/docs/modules/sql/pages/drop-mapping.adoc +++ b/docs/modules/sql/pages/drop-mapping.adoc @@ -35,7 +35,7 @@ The `DROP MAPPING` statement accepts the following parameters. == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:native-client-security.adoc#sql-permission[SQL Permissions]. +If xref:security:enable-security.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:client-authorization.adoc#sql-permission[SQL Permissions]. == Examples diff --git a/docs/modules/sql/pages/drop-view.adoc b/docs/modules/sql/pages/drop-view.adoc index 67d26e87b..bf5c73079 100644 --- a/docs/modules/sql/pages/drop-view.adoc +++ b/docs/modules/sql/pages/drop-view.adoc @@ -35,7 +35,7 @@ The `view_name` parameter is required. == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:native-client-security.adoc#sql-permission[SQL Permissions]. +If xref:security:enable-security.adoc[security] is enabled, you can grant or deny permission for certain clients to use this statement. See xref:security:client-authorization.adoc#sql-permission[SQL Permissions]. == Examples diff --git a/docs/modules/sql/pages/mapping-to-a-file-system.adoc b/docs/modules/sql/pages/mapping-to-a-file-system.adoc index 890ba9270..5114211c9 100644 --- a/docs/modules/sql/pages/mapping-to-a-file-system.adoc +++ b/docs/modules/sql/pages/mapping-to-a-file-system.adoc @@ -22,7 +22,7 @@ Depending on the <>, you may also == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can set up permissions to restrict clients' access to your files. For details, see xref:pipelines:job-security.adoc[]. +If xref:security:enable-security.adoc[security] is enabled, you can set up permissions to restrict clients' access to your files. For details, see xref:pipelines:job-security.adoc[]. == Configuration Options diff --git a/docs/modules/sql/pages/mapping-to-kafka.adoc b/docs/modules/sql/pages/mapping-to-kafka.adoc index c490b0383..7b7edf07a 100644 --- a/docs/modules/sql/pages/mapping-to-kafka.adoc +++ b/docs/modules/sql/pages/mapping-to-kafka.adoc @@ -20,7 +20,7 @@ or greater than 1.0.0. == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, your clients may need permissions to use this connector. For details, see xref:pipelines:job-security.adoc[]. +If xref:security:enable-security.adoc[security] is enabled, your clients may need permissions to use this connector. For details, see xref:pipelines:job-security.adoc[]. == Creating a Kafka Mapping diff --git a/docs/modules/sql/pages/mapping-to-maps.adoc b/docs/modules/sql/pages/mapping-to-maps.adoc index 2afd0766c..05fc1d40d 100644 --- a/docs/modules/sql/pages/mapping-to-maps.adoc +++ b/docs/modules/sql/pages/mapping-to-maps.adoc @@ -15,11 +15,11 @@ This connector is included in Hazelcast. == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can set up permissions to restrict clients' access to maps. +If xref:security:enable-security.adoc[security] is enabled, you can set up permissions to restrict clients' access to maps. For example, to restrict reads on maps, you can use the `create` and `read` permissions. To restrict inserts, you can use the `put` permission. -For details, see xref:security:native-client-security.adoc[]. +For details, see xref:security:client-authorization.adoc[]. == Creating a Mapping to a Map @@ -179,4 +179,4 @@ members' classpaths by creating a JAR file and adding it to the `lib` directory, or you can use user code deployment. User code deployment must be enabled on the members, see xref:clusters:deploying-code-from-clients.adoc[] for details. -include::clusters:partial$ucn-migrate-tip.adoc[] \ No newline at end of file +include::clusters:partial$ucn-migrate-tip.adoc[] diff --git a/docs/modules/sql/pages/mapping-to-mongo.adoc b/docs/modules/sql/pages/mapping-to-mongo.adoc index e1e2645e3..7ed08d03f 100644 --- a/docs/modules/sql/pages/mapping-to-mongo.adoc +++ b/docs/modules/sql/pages/mapping-to-mongo.adoc @@ -46,7 +46,7 @@ NOTE: To be able to use SQL over MongoDB, you have to include `hazelcast-sql` as == Permissions [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, your clients may need permissions to use this connector. +If xref:security:enable-security.adoc[security] is enabled, your clients may need permissions to use this connector. For details, see xref:pipelines:job-security.adoc[]. == Before you Begin diff --git a/docs/modules/sql/pages/sql-overview.adoc b/docs/modules/sql/pages/sql-overview.adoc index b6550d939..7b6355953 100644 --- a/docs/modules/sql/pages/sql-overview.adoc +++ b/docs/modules/sql/pages/sql-overview.adoc @@ -89,12 +89,12 @@ Keywords or built-in function names are case-insensitive. == Permissions and Security [.enterprise]*{enterprise-product-name}* -If xref:security:enabling-jaas.adoc[security] is enabled, you can set xref:security:native-client-security.adoc[permissions] for the following: +If xref:security:enable-security.adoc[security] is enabled, you can set xref:security:client-authorization.adoc[permissions] for the following: -- xref:security:native-client-security.adoc#connector-permission[Connectors] (mappings) -- Some xref:security:native-client-security.adoc#sql-permission[SQL statements] +- xref:security:client-authorization.adoc#connector-permission[Connectors] (mappings) +- Some xref:security:client-authorization.adoc#sql-permission[SQL statements] -NOTE: When you run a query, Hazelcast runs it as a job. As a result, clients with the `read` permission for jobs can see the SQL query text and the arguments. See xref:security:native-client-security.adoc#job-permission[Job Permissions]. +NOTE: When you run a query, Hazelcast runs it as a job. As a result, clients with the `read` permission for jobs can see the SQL query text and the arguments. See xref:security:client-authorization.adoc#job-permission[Job Permissions]. == Supported Statements diff --git a/docs/modules/storage/pages/configuring-persistence.adoc b/docs/modules/storage/pages/configuring-persistence.adoc index 1920e190f..6d4078b66 100644 --- a/docs/modules/storage/pages/configuring-persistence.adoc +++ b/docs/modules/storage/pages/configuring-persistence.adoc @@ -932,7 +932,7 @@ You can configure members to store your master encryption key in a Hashicorp Vau * `token`: The Vault authentication token. * `polling-interval`: The polling interval (in seconds) for checking for changes in Vault. Disabled by default. -* `ssl`: The TLS/SSL configuration for HTTPS support. See the xref:security:tls-ssl.adoc[TLS/SSL section] for more +* `ssl`: The TLS configuration for HTTPS support. See the xref:security:tls-ssl.adoc[TLS/SSL section] for more information about how to use the `ssl` element. NOTE: Sensitive configuration properties such as `token` should be protected using xref:configuration:variable-replacers.adoc[variable replacers]. diff --git a/docs/modules/wan/pages/configuring-for-map-and-cache.adoc b/docs/modules/wan/pages/configuring-for-map-and-cache.adoc index 691e107b9..a96cae6b2 100644 --- a/docs/modules/wan/pages/configuring-for-map-and-cache.adoc +++ b/docs/modules/wan/pages/configuring-for-map-and-cache.adoc @@ -88,7 +88,7 @@ the target map if it does not exist in the target map. * `HigherHitsMergePolicy`: Incoming entry merges from the source map to the target map if the source entry has more hits than the target one. * `PassThroughMergePolicy`: Incoming entry merges from the source map to -the target map unless the incoming entry is null. +the target map unless the incoming entry is not null. * `ExpirationTimeMergePolicy`: Incoming entry merges from the source map to the target map if the source entry will expire later than the destination entry. Please note that this merge policy can only be used when the clusters' clocks are in sync. @@ -169,7 +169,7 @@ the target cache if it does not exist in the target cache. * `HigherHitsMergePolicy`: Incoming entry merges from the source cache to the target cache if the source entry has more hits than the target one. * `PassThroughMergePolicy`: Incoming entry merges from the source cache to -the target cache unless the incoming entry is null. +the target cache unless the incoming entry is not null. * `ExpirationTimeMergePolicy`: Incoming entry merges from the source cache to the target cache if the source entry will expire later than the destination entry. Please note that this merge policy can only be used when the clusters' clocks are in sync. diff --git a/docs/modules/wan/pages/rest-api.adoc b/docs/modules/wan/pages/rest-api.adoc index 10e57fada..b445b86fa 100644 --- a/docs/modules/wan/pages/rest-api.adoc +++ b/docs/modules/wan/pages/rest-api.adoc @@ -17,7 +17,7 @@ are shown as placeholders in the REST calls: member on which you run the REST calls. * `clusterOnSource`: Name of your local (source) cluster. * `clusterPassword`: Password, if set, of your source cluster. -Note that you need to enable the xref:security:enabling-jaas.adoc[security] +Note that you need to enable the xref:security:enable-security.adoc[security] when you need a cluster password. If not set, the parameter is empty. * `wanRepName`: Name of the WAN Replication configuration. * `publisherId`: WAN replication publisher ID. If not set,