diff --git a/website/content/docs/commands/job/index.mdx b/website/content/docs/commands/job/index.mdx index 6a592688e53..fea1d50e296 100644 --- a/website/content/docs/commands/job/index.mdx +++ b/website/content/docs/commands/job/index.mdx @@ -16,20 +16,45 @@ Usage: `nomad job [options]` Run `nomad job -h` for help on that subcommand. The following subcommands are available: +- [`job action`][action] - Execute predefined actions +- [`job allocs`][allocs] - List allocations for a job - [`job deployments`][deployments] - List deployments for a job - [`job dispatch`][dispatch] - Dispatch an instance of a parameterized job - [`job eval`][eval] - Force an evaluation for a job - [`job history`][history] - Display all tracked versions of a job -- [`job inspect`][inspect] - Inspect the contents of a submitted job. +- [`job init`][init] - Create an example job specification +- [`job inspect`][inspect] - Inspect the contents of a submitted job +- [`job periodic force`][periodic force] - Force the evaluation of a periodic job +- [`job plan`][plan] - Schedule a dry run for a job - [`job promote`][promote] - Promote a job's canaries +- [`job restart`][restart] - Restart or reschedule allocations for a job - [`job revert`][revert] - Revert to a prior version of the job +- [`job run`][run] - Submit a new job +- [`job scale`][scale] - Update the number of allocations for a task group in a job +- [`job scaling-events`][scaling-events] - List the recent scaling events for a job - [`job status`][status] - Display status information about a job +- [`job stop`][stop] - Stop a running job and cancel its allocations +- [`job tag`][tag] - Tag a job with a version +- [`job validate`][validate] - Check a job specification for syntax errors + +[action]: /nomad/docs/commands/job/action 'Execute predefined actions' +[allocs]: /nomad/docs/commands/job/allocs 'List allocations for a job' [deployments]: /nomad/docs/commands/job/deployments 'List deployments for a job' [dispatch]: /nomad/docs/commands/job/dispatch 'Dispatch an instance of a parameterized job' [eval]: /nomad/docs/commands/job/eval 'Force an evaluation for a job' [history]: /nomad/docs/commands/job/history 'Display all tracked versions of a job' -[inspect]: /nomad/docs/commands/job/inspect -[promote]: /nomad/docs/commands/job/promote "Promote a job's canaries" +[init]: /nomad/docs/commands/job/init 'Create an example job specification' +[inspect]: /nomad/docs/commands/job/inspect 'Inspect the contents of a submitted job' +[periodic force]: /nomad/docs/commands/job/periodic-force 'Force the evaluation of a periodic job' +[plan]: /nomad/docs/commands/job/plan 'Schedule a dry run for a job' +[restart]: /nomad/docs/commands/job/restart 'Restart or reschedule allocations for a job' [revert]: /nomad/docs/commands/job/revert 'Revert to a prior version of the job' +[run]: /nomad/docs/commands/job/run 'Submit a new job' [status]: /nomad/docs/commands/job/status 'Display status information about a job' +[scale]: /nomad/docs/commands/job/scale 'Update the number of allocations for a task group in a job' +[scaling-events]: /nomad/docs/commands/job/scaling-events 'List the recent scaling events for a job' +[stop]: /nomad/docs/commands/job/stop 'Stop a running job and cancel its allocations' +[tag]: /nomad/docs/commands/job/tag 'Tag a job with a version' +[validate]: /nomad/docs/commands/job/validate 'Check a job specification for syntax errors' +[promote]: /nomad/docs/commands/job/promote diff --git a/website/content/docs/concepts/architecture/federation.mdx b/website/content/docs/concepts/architecture/federation.mdx new file mode 100644 index 00000000000..6d1b73ca218 --- /dev/null +++ b/website/content/docs/concepts/architecture/federation.mdx @@ -0,0 +1,64 @@ +--- +layout: docs +page_title: Federation +description: |- + Nomad federation is a multi-cluster orchestration and management feature that allows multiple + Nomad clusters, defined as a region, to work together seamlessly. +--- + +# Federation + +Nomad federation is a multi-cluster orchestration and management feature that allows multiple Nomad +clusters, defined as a region, to work together seamlessly. By federating clusters, you benefit +from improved scalability, fault tolerance, and centralized management of workloads across various +data centers or geographical locations. + +## Cross-Region request forwarding + +API calls can include a `region` query parameter that defines the Nomad region the query is +specified for. If this is not the local region, Nomad transparently forwards the request to a +server in the requested region. When you omit the query parameter, Nomad uses the region of the +server that is processing the request. + +## Replication + +Nomad writes the following objects in the authoritative region and replicates them to all federated +regions: + +- ACL [policies][acl_policy], [roles][acl_role], [auth methods][acl_auth_method], + [binding rules][acl_binding_rule], and [global tokens][acl_token] +- [Namespaces][namespace] +- [Node pools][node_pool] +- [Quota specifications][quota] +- [Sentinel policies][sentinel_policies] + +When creating, updating, or deleting these objects, Nomad always sends the request to the +authoritative region using RPC forwarding. + +Nomad starts replication routines on each federated cluster's leader server in a hub and spoke +design. The routines then use blocking queries to receive updates from the authoritative region to +mirror in their own state store. These routines also implement rate limiting, so that busy clusters +do not degrade due to overly aggressive replication processes. + + +Nomad writes ACL local tokens in the region where you make the request and does not replicate +those local tokens. + + +## Multi-Region job deployments + +Nomad job deployments can use the [`multiregion`][] block when running in federated mode. +Multiregion configuration instructs Nomad to register and run the job on all the specified regions, +removing the need for multiple job specification copies and registration on each region. +Multiregion jobs do not provide regional failover in the event of failure. + +[acl_policy]: /nomad/docs/concepts/acl#policy +[acl_role]: /nomad/docs/concepts/acl#role +[acl_auth_method]: /nomad/docs/concepts/acl#auth-method +[acl_binding_rule]: /nomad/docs/concepts/acl#binding-rule +[acl_token]: /nomad/docs/concepts/acl#token +[node_pool]: /nomad/docs/concepts/node-pools +[namespace]: /nomad/docs/other-specifications/namespace +[quota]: /nomad/docs/other-specifications/quota +[sentinel_policies]: /nomad/docs/enterprise/sentinel#sentinel-policies +[`multiregion`]: /nomad/docs/job-specification/multiregion diff --git a/website/content/docs/concepts/architecture.mdx b/website/content/docs/concepts/architecture/index.mdx similarity index 100% rename from website/content/docs/concepts/architecture.mdx rename to website/content/docs/concepts/architecture/index.mdx diff --git a/website/content/docs/concepts/workload-identity.mdx b/website/content/docs/concepts/workload-identity.mdx index 133d6121181..5effb4ae2d1 100644 --- a/website/content/docs/concepts/workload-identity.mdx +++ b/website/content/docs/concepts/workload-identity.mdx @@ -172,7 +172,7 @@ Consul and Vault can be configured to accept workload identities from Nomad for authentication. Refer to the [Consul][consul_int] and [Vault][vault_int] integration pages for more information. -[allocation]: /nomad/docs/concepts/architecture#allocation +[allocation]: /nomad/docs/glossary#allocation [identity-block]: /nomad/docs/job-specification/identity [jobspec_consul]: /nomad/docs/job-specification/consul [jobspec_consul_ns]: /nomad/docs/job-specification/consul#namespace diff --git a/website/content/docs/configuration/server.mdx b/website/content/docs/configuration/server.mdx index a1a4fa113d9..8ec3b1caae9 100644 --- a/website/content/docs/configuration/server.mdx +++ b/website/content/docs/configuration/server.mdx @@ -31,11 +31,12 @@ server { - `authoritative_region` `(string: "")` - Specifies the authoritative region, which provides a single source of truth for global configurations such as ACL - Policies and global ACL tokens. Non-authoritative regions will replicate from - the authoritative to act as a mirror. By default, the local region is assumed - to be authoritative. Setting `authoritative_region` assumes that ACLs have - been bootstrapped in the authoritative region. See [Configure for multiple - regions][] in the ACLs tutorial. + Policies and global ACL tokens in multi-region, federated deployments. + Non-authoritative regions will replicate from the authoritative to act as a + mirror. By default, the local region is assumed to be authoritative. Setting + `authoritative_region` assumes that ACLs have been bootstrapped in the + authoritative region. See [Configure for multiple regions][] in the ACLs + tutorial. - `bootstrap_expect` `(int: required)` - Specifies the number of server nodes to wait for before bootstrapping. It is most common to use the odd-numbered diff --git a/website/content/docs/enterprise/sentinel.mdx b/website/content/docs/enterprise/sentinel.mdx index 6ca332992a8..30fcf3c5e18 100644 --- a/website/content/docs/enterprise/sentinel.mdx +++ b/website/content/docs/enterprise/sentinel.mdx @@ -15,8 +15,8 @@ Docker images. Sentinel policies are defined as code, giving operators considerable flexibility to meet compliance requirements. See the [Nomad Sentinel Tutorial][] for more information about deploying -Sentinel policies, as well as the documentation for the [`nomad sentinel` -subcommands][] +Sentinel policies, as well as the documentation for the [`nomad sentinel`][] +subcommands. ### Sentinel Policies @@ -86,7 +86,7 @@ the Sentinel convention. Here are some examples: | `namespace.NodePoolConfiguration.Allowed[0]` | `namespace.node_pool_configuration.allowed[0]` | [Nomad Sentinel Tutorial]: /nomad/tutorials/governance-and-policy/sentinel -[`nomad sentinel` sub-commands]: /nomad/docs/commands/sentinel +[`nomad sentinel`]: /nomad/docs/commands/sentinel [sentinel]: https://docs.hashicorp.com/sentinel [JSON job specification]: /nomad/api-docs/json-jobs [ACL token]: https://github.com/hashicorp/nomad/blob/v1.7.0-rc.1/nomad/structs/structs.go#L12991-L13020 diff --git a/website/content/docs/job-specification/ui.mdx b/website/content/docs/job-specification/ui.mdx index c9792168b2d..b2fe276b662 100644 --- a/website/content/docs/job-specification/ui.mdx +++ b/website/content/docs/job-specification/ui.mdx @@ -46,3 +46,5 @@ job "docs" { # ... } ``` + +![Job UI links and description rendered in the Web UI](/img/nomad-ui-block.png) diff --git a/website/content/docs/networking/index.mdx b/website/content/docs/networking/index.mdx index 4a5aa1c5fdd..a613c613ee9 100644 --- a/website/content/docs/networking/index.mdx +++ b/website/content/docs/networking/index.mdx @@ -24,7 +24,7 @@ Nomad differs from other tools in this aspect. ## Allocation networking The base unit of scheduling in Nomad is an -[allocation](/nomad/docs/concepts/architecture#allocation), which means that all +[allocation](/nomad/docs/glossary#allocation), which means that all tasks in the same allocation run in the same client and share common resources, such as disk and networking. Allocations can request access to network resources, such as ports, using the diff --git a/website/content/docs/operations/federation/failure.mdx b/website/content/docs/operations/federation/failure.mdx new file mode 100644 index 00000000000..0a4ea5e62fe --- /dev/null +++ b/website/content/docs/operations/federation/failure.mdx @@ -0,0 +1,139 @@ +--- +layout: docs +page_title: Federated cluster failure scenarios +description: Failure scenarios in multi-region federated cluster deployments. +--- + +# Failure scenarios + +When running Nomad in federated mode, failure situations and impacts are different depending on +whether the authoritative region is the impacted region or not, and what the failure mode is. In +soft failures, the region's servers have lost quorum but the Nomad processes are still up, running, +and reachable. In hard failures, the regional servers are completely unreachable and are akin to +the underlying hardware having been terminated (cloud) or powered-off (on-prem). + +The scenarios are based on a Nomad deployment running three federated regions: + * `asia-south-1` + * `europe-west-1` - authoritative region + * `us-east-1` + +## Federated region failure: soft +In this situation the region `asia-south-1` has lost leadership but the servers are reachable and +up. + +All server logs in the impacted region have entries such as this example. +```console +[ERROR] nomad/worker.go:504: worker: failed to dequeue evaluation: worker_id=d19e6bb5-5ec9-8f75-9caf-47e2513fe28d error="No cluster leader" +``` + +βœ… Request forwarding continues to work between all federated regions that are running with + leadership. + +🟨 API requests, either directly or attempting to use request forwarding to the impacted region, + fail unless using the `stale=true` flag. + +βœ… Creation and deletion of replicated objects, such as namespaces, is written to the + authoritative region. + +βœ… Any federated regions with leadership is able to continue to replicate all objects detailed + previously. + +βœ… Creation of local ACL tokens continues to work for all regions with leadership. + +βœ… Jobs **without** the [`multiregion`][] block deploy to all regions with leadership. + +❌ Jobs **with** the [`multiregion`][] block defined fail to deploy. + +## Federated region failure: hard +In this situation the region `asia-south-1` has gone down. When this happens, the Nomad server logs +for the other regions have log entries similar to this example. +```console +[DEBUG] go-hclog@v1.6.3/stdlog.go:58: nomad: memberlist: Failed UDP ping: asia-south-1-server-1.asia-south-1 (timeout reached) +[INFO] go-hclog@v1.6.3/stdlog.go:60: nomad: memberlist: Suspect asia-south-1-server-1.asia-south-1 has failed, no acks received +[DEBUG] go-hclog@v1.6.3/stdlog.go:58: nomad: memberlist: Initiating push/pull sync with: us-east-1-server-1.us-east-1 192.168.1.193:9002 +[DEBUG] go-hclog@v1.6.3/stdlog.go:58: nomad: memberlist: Failed UDP ping: asia-south-1-server-1.asia-south-1 (timeout reached) +[INFO] go-hclog@v1.6.3/stdlog.go:60: nomad: memberlist: Suspect asia-south-1-server-1.asia-south-1 has failed, no acks received +``` + +βœ… Request forwarding continues to work between all federated regions that are running with + leadership. + +❌ API requests, either directly or attempting to use request forwarding to the impacted region, + fail. + +βœ… Creation and deletion of replicated objects, such as namespaces, are written to the + authoritative region. + +βœ… Any federated regions with leadership continue to replicate all objects detailed + above. + +βœ… Creation of local ACL tokens continues to work for all regions which are running with + leadership. + +βœ… Jobs **without** the [`multiregion`][] block deploy to all regions with leadership. + +❌ Jobs **with** the [`multiregion`][] block defined fail to deploy. + +## Authoritative region failure: soft +In this situation the region `europe-west-1` has lost leadership but the servers are reachable and +up. + +The server logs in the authoritative region have entries such as this example. +```console +[ERROR] nomad/worker.go:504: worker: failed to dequeue evaluation: worker_id=68b3abe2-5e16-8f04-be5a-f76aebb0e59e error="No cluster leader" +``` + +βœ… Request forwarding continues to work between all federated regions that are running with + leadership. + +🟨 API requests, either directly or attempting to use request forwarding to the impacted region, + fail unless using the `stale=true` flag. + +❌ Creation and deletion of replicated objects, such as namespaces, fails. + +❌ Any federated regions are able to read data to replicate as they use the stale flag, but no + writes can occur to the authoritative region as described previously. + +βœ… Creation of local ACL tokens continues to work for all federated regions which are running + with leadership. + +βœ… Jobs **without** the [`multiregion`][] block deploy to all federated regions which + are running with leadership. + +❌ Jobs **with** the [`multiregion`][] block defined fails to deploy. + +## Authoritative region failure: hard +In this situation the region `europe-west-1` has gone down. When this happens, the Nomad server +leader logs for the other regions have log entries similar to this example. +```console +[ERROR] nomad/leader.go:544: nomad: failed to fetch namespaces from authoritative region: error="rpc error: EOF" +[ERROR] nomad/leader.go:1767: nomad: failed to fetch policies from authoritative region: error="rpc error: EOF" +[ERROR] nomad/leader.go:2498: nomad: failed to fetch ACL binding rules from authoritative region: error="rpc error: EOF" +[ERROR] nomad/leader_ent.go:226: nomad: failed to fetch quota specifications from authoritative region: error="rpc error: EOF" +[ERROR] nomad/leader.go:703: nomad: failed to fetch node pools from authoritative region: error="rpc error: EOF" +[ERROR] nomad/leader.go:1909: nomad: failed to fetch tokens from authoritative region: error="rpc error: EOF" +[ERROR] nomad/leader.go:2083: nomad: failed to fetch ACL Roles from authoritative region: error="rpc error: EOF" +[DEBUG] nomad/leader_ent.go:84: nomad: failed to fetch policies from authoritative region: error="rpc error: EOF" +[ERROR] nomad/leader.go:2292: nomad: failed to fetch ACL auth-methods from authoritative region: error="rpc error: EOF" +[DEBUG] go-hclog@v1.6.3/stdlog.go:58: nomad: memberlist: Failed UDP ping: europe-west-1-server-1.europe-west-1 (timeout reached) +[INFO] go-hclog@v1.6.3/stdlog.go:60: nomad: memberlist: Suspect europe-west-1-server-1.europe-west-1 has failed, no acks received +[DEBUG] go-hclog@v1.6.3/stdlog.go:58: nomad: memberlist: Failed UDP ping: europe-west-1-server-1.europe-west-1 (timeout reached) +``` + +βœ… Request forwarding continues to work between all federated regions that are running with + leadership. + +❌ API requests, either directly or attempting to use request forwarding to the impacted region, + fail. + +❌ Creation and deletion of replicated objects, such as namespaces, fails. + +❌ Any federated regions with leadership is not able to replicate objects detailed in the logs. + +βœ… Creation of local ACL tokens continues to work for all regions with leadership. + +βœ… Jobs **without** the [`multiregion`][] block deploy to regions with leadership. + +❌ Jobs **with** the [`multiregion`][] block defined fail to deploy. + +[`multiregion`]: /nomad/docs/job-specification/multiregion diff --git a/website/content/docs/operations/federation/index.mdx b/website/content/docs/operations/federation/index.mdx new file mode 100644 index 00000000000..d0def5b9244 --- /dev/null +++ b/website/content/docs/operations/federation/index.mdx @@ -0,0 +1,64 @@ +--- +layout: docs +page_title: Federated cluster operations +description: |- + Operational considerations for running Nomad multi-region federated clusters as well as instructions for migrating the authoritative region to a federated region. +--- + +# Federated cluster operations + +This page lists operational considerations for running multi-region federated +clusters as well as instructions for migrating the authoritative region to a +federated region. + +## Operational considerations + +When operating multi-region federated Nomad clusters, consider the following: + +* **Regular snapshots**: You can back up Nomad server state using the + [`nomad operator snapshot save`][] and [`nomad operator snapshot agent`][] commands. Performing + regular backups expedites disaster recovery. The cadence depends on cluster rates of change + and your internal SLA’s. You should regularly test snapshots using the + [`nomad operator snapshot restore`][] command to ensure they work. + +* **Local ACL management tokens**: You need local management tokens to perform federated cluster + administration when the authoritative region is down. Make sure you have existing break-glass + tokens available for each region. + +* **Known paths to creating local ACL tokens**: If the authoritative region fails, creation of + global ACL tokens fails. If this happens, having the ability to create local ACL tokens allows + you to continue to interact with each available federated region. + +## Authoritative and federated regions + +* **Can non-authoritative regions continue to operate if the authoritative region is unreachable?**: + Yes, running workloads are never interrupted due to federation failures. Scheduling of new + workloads and rescheduling of failed workloads is never interrupted due to federation failures. + See [Failure Scenarios][failure_scenarios] for details. + +* **Can the authoritative region be deployed with servers only?** Yes, deploying the Nomad + authoritative region with servers only, without clients, works as expected. This servers-only + approach can expedite disaster recovery of the region. Restoration does not include objects such + as nodes, jobs, or allocations, which are large and require compute intensive reconciliation + after restoration. + +* **Can I migrate the authoritative region to a currently federated region?** It is possible by + following these steps: + + 1. Update the [`authoritative_region`][] configuration parameter on the desired authoritative + region servers. + 1. Restart the server processes in the new authoritative region and ensure all data is present in + state as expected. If the network was partitioned as part of the failure of the original + authoritative region, writes of replicated objects may not have been successfully replicated to + federated regions. + 1. Update the [`authoritative_region`][] configuration parameter on the federated region servers + and restart their processes. + +* **Can federated regions be bootstrapped while the authoritative region is down?** No they +cannot. + +[`nomad operator snapshot save`]: /nomad/docs/commands/operator/snapshot/save +[`nomad operator snapshot agent`]: /nomad/docs/commands/operator/snapshot/agent +[`nomad operator snapshot restore`]: /nomad/docs/commands/operator/snapshot/restore +[failure_scenarios]: /nomad/docs/operations/federation/failure +[`authoritative_region`]: /nomad/docs/configuration/server#authoritative_region diff --git a/website/content/docs/upgrade/upgrade-specific.mdx b/website/content/docs/upgrade/upgrade-specific.mdx index 8e008ee6687..296aefdc0f5 100644 --- a/website/content/docs/upgrade/upgrade-specific.mdx +++ b/website/content/docs/upgrade/upgrade-specific.mdx @@ -13,14 +13,17 @@ upgrade. However, specific versions of Nomad may have more details provided for their upgrades as a result of new features or changed behavior. This page is used to document those details separately from the standard upgrade flow. -## Nomad 1.9.2 +## Nomad 1.9.3 -In Nomad 1.9.2, the mechanism used for calculating when objects are eligible +In Nomad 1.9.3, the mechanism used for calculating when objects are eligible for garbage collection changes to a clock-based one. This has two consequences. First, it allows to set arbitrarily long GC intervals. Second, it requires that Nomad servers are kept roughly in sync time-wise, because GC can originate in a follower. +Nomad 1.9.2 contained a bug that could drop all cluster state on upgrade and +has been removed from downloads. + ## Nomad 1.9.0 #### Dropped support for older clients @@ -35,10 +38,10 @@ block. Nomad 1.9.0 stores keys used for signing Workload Identity and encrypting Variables in Raft, instead of storing key material in the external keystore. When using external KMS or Vault transit encryption for the -[`keyring`][] provider, the key encryption key (KEK) is stored outside of Nomad -and no cleartext key material exists on disk. When using the default AEAD -provider, the key encryption key (KEK) is stored in Raft alongside the encrypted -data encryption keys (DEK). +[`keyring`](/nomad/docs/configuration/keyring) provider, the key encryption key +(KEK) is stored outside of Nomad and no cleartext key material exists on disk. +When using the default AEAD provider, the key encryption key (KEK) is stored in +Raft alongside the encrypted data encryption keys (DEK). Nomad automatically migrates the key storage for all key material on the first [`root_key_gc_interval`][] after all servers are upgraded to 1.9.0. The diff --git a/website/content/tools/index.mdx b/website/content/tools/index.mdx index 603707a8111..f3de72863f9 100644 --- a/website/content/tools/index.mdx +++ b/website/content/tools/index.mdx @@ -45,6 +45,7 @@ The following external tools are currently available for Nomad and maintained by - [Nomad Pipeline](https://github.com/HyperBadger/nomad-pipeline) - A tool to make running pipeline-style workloads on Nomad - [Nomad Port Forward](https://github.com/Mongey/nomad-port-forward) - A tool for forwarding ports from a Nomad job to your local machine - [Nomad Toast](https://github.com/jrasell/nomad-toast) - A tool for receiving notifications based on HashiCorp Nomad events +- [Nomad Tools](https://github.com/Kamilcuk/nomad-tools) - Set of tools and utilities to ease interacting with HashiCorp Nomad scheduling solution. - [Nomad Vector Logger](https://github.com/mr-karan/nomad-vector-logger) - A daemon which continuously watches jobs running in a Nomad cluster and templates out a Vector configuration file which can be used to collect application logs enriched with Nomad metadata. - [Nomad Watcher](https://github.com/blalor/nomad-watcher) - A simple service that watches Nomad's nodes, jobs, deployments, evaluations, allocations, and task states, and writes the events to a file - [Nomadgen](https://github.com/smintz/nomadgen) - Craft your Hashicorp's Nomad job specs in python. diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index b8730da65a7..5692fbd7da0 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -131,7 +131,16 @@ }, { "title": "Architecture", - "path": "concepts/architecture" + "routes": [ + { + "title": "Overview", + "path": "concepts/architecture" + }, + { + "title": "Federation", + "path": "concepts/architecture/federation" + } + ] }, { "title": "CPU", @@ -2385,6 +2394,19 @@ "title": "Key Management", "path": "operations/key-management" }, + { + "title": "Federation", + "routes": [ + { + "title": "Overview", + "path": "operations/federation" + }, + { + "title": "Failure", + "path": "operations/federation/failure" + } + ] + }, { "title": "Considerations for Stateful Workloads", "path": "operations/stateful-workloads" diff --git a/website/public/img/nomad-ui-block.png b/website/public/img/nomad-ui-block.png new file mode 100644 index 00000000000..f69a2169399 Binary files /dev/null and b/website/public/img/nomad-ui-block.png differ