From 7cb5d7be2a5db2270bc3c9e898f32de801809829 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Wed, 18 Sep 2024 09:24:39 +0100 Subject: [PATCH 001/141] xe autocompletion: Fix prefix escaping bug Before, having an escape sequence as part of the parameter would break grep: ``` $ xe vdi-list name-label=CentOS\ 7\ \(1grep: Unmatched ( or \( ``` Move back to pure bash processing for the prefix, since it's the weirdness of variable escaping leaving the bash context causing this. Signed-off-by: Andrii Sultanov --- ocaml/xe-cli/bash-completion | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ocaml/xe-cli/bash-completion b/ocaml/xe-cli/bash-completion index 0c29a5446b9..98df8be24fb 100644 --- a/ocaml/xe-cli/bash-completion +++ b/ocaml/xe-cli/bash-completion @@ -768,10 +768,10 @@ __add_completion() __preprocess_suggestions() { - echo "$1" | \ - sed -re 's/(^|[^\])((\\\\)*),,*/\1\2\n/g' -e 's/\\,/,/g' -e 's/\\\\/\\/g' | \ - sed -e 's/ *$//' | \ - grep "^${prefix}.*" + wordlist=$( echo "$1" | \ + sed -re 's/(^|[^\])((\\\\)*),,*/\1\2\n/g' -e 's/\\,/,/g' -e 's/\\\\/\\/g' | \ + sed -e 's/ *$//') + compgen -W "$wordlist" "$prefix" } # set_completions suggestions current_prefix description_cmd From d0215eda03d589b06d1b4903260e3c813e6c484b Mon Sep 17 00:00:00 2001 From: Guillaume Date: Wed, 18 Sep 2024 10:00:53 +0200 Subject: [PATCH 002/141] Use None4 if MODE is set to none Signed-off-by: Guillaume --- ocaml/networkd/lib/network_config.ml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ocaml/networkd/lib/network_config.ml b/ocaml/networkd/lib/network_config.ml index d9beb1b75c7..b306b580b32 100644 --- a/ocaml/networkd/lib/network_config.ml +++ b/ocaml/networkd/lib/network_config.ml @@ -135,9 +135,12 @@ let read_management_conf () = in let dns = (nameservers, domains) in (Static4 [(ip, prefixlen)], gateway, dns) - | "dhcp" | _ -> + | "dhcp" -> (DHCP4, None, ([], [])) + | _ -> + (None4, None, ([], [])) in + let phy_interface = {default_interface with persistent_i= true} in let bridge_interface = {default_interface with ipv4_conf; ipv4_gateway; persistent_i= true; dns} From 3d02e4556865d4b3cded328e7b8c4e21969d8985 Mon Sep 17 00:00:00 2001 From: Benjamin Reis Date: Wed, 18 Sep 2024 15:37:27 +0200 Subject: [PATCH 003/141] Fix network reset script in static IPv6 Only exits script when an error occurs when getting the static IPv6 config info Signed-off-by: Benjamin Reis --- python3/bin/xe-reset-networking | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python3/bin/xe-reset-networking b/python3/bin/xe-reset-networking index c1c3e38d283..58091d09120 100755 --- a/python3/bin/xe-reset-networking +++ b/python3/bin/xe-reset-networking @@ -154,9 +154,10 @@ if __name__ == "__main__": if options.mode_v6 == 'static': if options.ipv6 == '': parser.error("if static IPv6 mode is selected, an IPv6 address needs to be specified") - elif options.ipv6.find('/') == -1: + sys.exit(1) + if options.ipv6.find('/') == -1: parser.error("Invalid format: IPv6 must be specified with CIDR format: /") - sys.exit(1) + sys.exit(1) # Warn user if not os.access('/tmp/fist_network_reset_no_warning', os.F_OK): From 461f602b37037ec04a4b6f7a9662c1941ffd841d Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Wed, 18 Sep 2024 12:06:42 +0100 Subject: [PATCH 004/141] CA-398128: Be wary that dates in database lose precision Dates converted from unix timestamps have subsecond precision, but dates serialized in the database do not. This is a change in behaviour introduced in 233b96ba44b002a27205b0681f35527457b78c2c This fix was supposed to be included in the change, but got lost. No other users were found that hit this change in behaviour. Signed-off-by: Pau Ruiz Safont --- ocaml/xapi/xapi_xenops.ml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ocaml/xapi/xapi_xenops.ml b/ocaml/xapi/xapi_xenops.ml index f50e692a555..0cfe9493d1a 100644 --- a/ocaml/xapi/xapi_xenops.ml +++ b/ocaml/xapi/xapi_xenops.ml @@ -2282,13 +2282,15 @@ let update_vm ~__context id = Option.iter (fun (_, state) -> let metrics = Db.VM.get_metrics ~__context ~self in + (* Clamp time to full seconds, stored timestamps do not + have decimals *) let start_time = - Date.of_unix_time state.Vm.last_start_time + Float.floor state.Vm.last_start_time |> Date.of_unix_time in - if - start_time - <> Db.VM_metrics.get_start_time ~__context ~self:metrics - then ( + let expected_time = + Db.VM_metrics.get_start_time ~__context ~self:metrics + in + if Date.is_later ~than:expected_time start_time then ( debug "xenopsd event: Updating VM %s last_start_time <- %s" id Date.(to_rfc3339 (of_unix_time state.Vm.last_start_time)) ; From 8cf4d11faf6b1167beabbb1ce28ee7c32b02979b Mon Sep 17 00:00:00 2001 From: xueqingz Date: Thu, 5 Sep 2024 07:32:49 +0000 Subject: [PATCH 005/141] CA-398138: Handle enum value unknown error for Go SDK Signed-off-by: xueqingz --- ocaml/sdk-gen/go/templates/ConvertEnum.mustache | 2 +- ocaml/sdk-gen/go/templates/Enum.mustache | 2 ++ ocaml/sdk-gen/go/test_data/enum.go | 2 ++ ocaml/sdk-gen/go/test_data/enum_convert.go | 2 +- 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ocaml/sdk-gen/go/templates/ConvertEnum.mustache b/ocaml/sdk-gen/go/templates/ConvertEnum.mustache index 85bb1660c24..5663d04ab01 100644 --- a/ocaml/sdk-gen/go/templates/ConvertEnum.mustache +++ b/ocaml/sdk-gen/go/templates/ConvertEnum.mustache @@ -17,7 +17,7 @@ func deserialize{{func_name_suffix}}(context string, input interface{}) (value { value = {{name}} {{/items}} default: - err = fmt.Errorf("unable to parse XenAPI response: got value %q for enum %s at %s, but this is not any of the known values", strValue, "{{type}}", context) + value = {{type}}Unrecognized } return } diff --git a/ocaml/sdk-gen/go/templates/Enum.mustache b/ocaml/sdk-gen/go/templates/Enum.mustache index 1b668dd19bc..5288573e5da 100644 --- a/ocaml/sdk-gen/go/templates/Enum.mustache +++ b/ocaml/sdk-gen/go/templates/Enum.mustache @@ -6,6 +6,8 @@ const ( //{{#doc}} {{.}}{{/doc}} {{name}} {{type}} = "{{value}}" {{/values}} + // The value does not belong to this enumeration + {{name}}Unrecognized {{name}} = "unrecognized" ) {{/enums}} \ No newline at end of file diff --git a/ocaml/sdk-gen/go/test_data/enum.go b/ocaml/sdk-gen/go/test_data/enum.go index 0a0e17be7d3..db0e9d6994c 100644 --- a/ocaml/sdk-gen/go/test_data/enum.go +++ b/ocaml/sdk-gen/go/test_data/enum.go @@ -5,4 +5,6 @@ const ( VMTelemetryFrequencyDaily VMTelemetryFrequency = "daily" // Run telemetry task weekly VMTelemetryFrequencyWeekly VMTelemetryFrequency = "weekly" + // The value does not belong to this enumeration + VMTelemetryFrequencyUnrecognized VMTelemetryFrequency = "unrecognized" ) diff --git a/ocaml/sdk-gen/go/test_data/enum_convert.go b/ocaml/sdk-gen/go/test_data/enum_convert.go index 40129c0e5ca..737436cc192 100644 --- a/ocaml/sdk-gen/go/test_data/enum_convert.go +++ b/ocaml/sdk-gen/go/test_data/enum_convert.go @@ -14,7 +14,7 @@ func deserializeEnumTaskStatusType(context string, input interface{}) (value Tas case "success": value = TaskStatusTypeSuccess default: - err = fmt.Errorf("unable to parse XenAPI response: got value %q for enum %s at %s, but this is not any of the known values", strValue, "TaskStatusType", context) + value = TaskStatusTypeUnrecognized } return } \ No newline at end of file From 4d2134ef71f004bcd132cf489618564f71b8692b Mon Sep 17 00:00:00 2001 From: Gang Ji Date: Thu, 19 Sep 2024 15:06:22 +0800 Subject: [PATCH 006/141] CA-399229: Assert no host pending mandatory guidance for pool.join In pre-join check for pool.join, assert that there is no host pending mandatory guidance on the joining host or the pool coordinator. Signed-off-by: Gang Ji --- ocaml/xapi/xapi_pool.ml | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index 49ea7194dc9..1b2f4c08a75 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -807,6 +807,37 @@ let pre_join_checks ~__context ~rpc ~session_id ~force = (pool_joining_host_ca_certificates_conflict, !conflicting_names) ) in + let assert_no_host_pending_mandatory_guidance () = + (* Assert that there is no host pending mandatory guidance on the joiner or + the remote pool coordinator. + *) + Repository_helpers.assert_no_host_pending_mandatory_guidance ~__context + ~host:(Helpers.get_localhost ~__context) ; + let remote_coordinator = get_master ~rpc ~session_id in + let remote_coordinator_pending_mandatory_guidances = + Client.Host.get_pending_guidances ~rpc ~session_id + ~self:remote_coordinator + in + if remote_coordinator_pending_mandatory_guidances <> [] then ( + error + "%s: %d mandatory guidances are pending for remote coordinator %s: [%s]" + __FUNCTION__ + (List.length remote_coordinator_pending_mandatory_guidances) + (Ref.string_of remote_coordinator) + (remote_coordinator_pending_mandatory_guidances + |> List.map Updateinfo.Guidance.of_pending_guidance + |> List.map Updateinfo.Guidance.to_string + |> String.concat ";" + ) ; + raise + Api_errors.( + Server_error + ( host_pending_mandatory_guidances_not_empty + , [Ref.string_of remote_coordinator] + ) + ) + ) + in (* call pre-join asserts *) assert_pool_size_unrestricted () ; assert_management_interface_exists () ; @@ -817,6 +848,9 @@ let pre_join_checks ~__context ~rpc ~session_id ~force = assert_i_know_of_no_other_hosts () ; assert_no_running_vms_on_me () ; assert_no_vms_with_current_ops () ; + (* check first no host pending mandatory guidance then the hosts compatible, + api version and db schema *) + assert_no_host_pending_mandatory_guidance () ; assert_hosts_compatible () ; if not force then assert_hosts_homogeneous () ; assert_no_shared_srs_on_me () ; From 5fa15f0ddd187576ca542ef9e816d3718ddc1ef9 Mon Sep 17 00:00:00 2001 From: Mark Syms Date: Thu, 19 Sep 2024 14:00:57 +0100 Subject: [PATCH 007/141] Fix the definition of the Data.mirror operation Defining as Copy of uri * uri results in JSON of `["Copy", ["uri", "uri"]]` but the unparse tries to enforce the `["uri", "uri"]` as a tuple, a type which is not supported by JSON. Signed-off-by: Mark Syms --- ocaml/xapi-storage/generator/lib/data.ml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/ocaml/xapi-storage/generator/lib/data.ml b/ocaml/xapi-storage/generator/lib/data.ml index e4571892f71..7aa66db8909 100644 --- a/ocaml/xapi-storage/generator/lib/data.ml +++ b/ocaml/xapi-storage/generator/lib/data.ml @@ -239,14 +239,17 @@ end module Data (R : RPC) = struct open R + type copy_operation_v1 = string [@@deriving rpcty] + type mirror_operation_v1 = string [@@deriving rpcty] + (** The primary key for referring to a long-running operation. *) type operation = - | Copy of uri * uri - (** Copy (src,dst) represents an on-going copy operation - from the [src] URI to the [dst] URI. *) - | Mirror of uri * uri - (** Mirror (src,dst) represents an on-going mirror - operation from the [src] URI to the [dst] URI. *) + | CopyV1 of copy_operation_v1 + (** CopyV1 (key) represents an on-going copy operation + with the unique [key]. *) + | MirrorV1 of mirror_operation_v1 + (** MirrorV1 (key) represents an on-going mirror + operation with the unique [key]. *) [@@deriving rpcty] (** A list of operations. *) From 338c94e557c0288955925a912c365a115219bf89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Sat, 14 Sep 2024 18:47:34 +0100 Subject: [PATCH 008/141] CP-51479: [maintenance]: synchronize api_version.ml and api_version.ml.in MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ./configure generates api_version.ml from api_version.ml.in, but their comments didn't match. Synchronize them, such that running ./configure doesn't result in committable changes. Also ignore the api_version.ml.in2 file that gets created. Signed-off-by: Edwin Török --- .gitignore | 1 + ocaml/idl/api_version.ml | 3 +-- ocaml/idl/api_version.ml.in | 4 ++++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index b519eb9cb39..27ed892007d 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ ocaml/xenopsd/scripts/xen-backend.rules ocaml/xenopsd/xentoollog_flags ocaml/idl/gen_lifecycle.exe +ocaml/idl/api_version.ml.in2 # hugo .hugo_build.lock diff --git a/ocaml/idl/api_version.ml b/ocaml/idl/api_version.ml index 297be24bc25..23028c50796 100644 --- a/ocaml/idl/api_version.ml +++ b/ocaml/idl/api_version.ml @@ -12,8 +12,7 @@ * GNU Lesser General Public License for more details. *) -(* This file is only needed for building xapi with local make, now the - api_version_major and api_version_minor are defined in xapi.spec and this +(* Now the api_version_major and api_version_minor are defined in xapi.spec and this file will be regenerated from api_version.ml.in by configure.ml during koji build. *) diff --git a/ocaml/idl/api_version.ml.in b/ocaml/idl/api_version.ml.in index 984d207c7f6..07de45fbcaf 100644 --- a/ocaml/idl/api_version.ml.in +++ b/ocaml/idl/api_version.ml.in @@ -12,6 +12,10 @@ * GNU Lesser General Public License for more details. *) +(* Now the api_version_major and api_version_minor are defined in xapi.spec and this + file will be regenerated from api_version.ml.in by configure.ml during koji + build. *) + let api_version_major = @APIVERMAJ@L let api_version_minor = @APIVERMIN@L From e9427b4f9b4a467af13405d6cbf35b55be7df548 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Sat, 14 Sep 2024 09:59:50 +0100 Subject: [PATCH 009/141] CP-51479: [maintenance]: do not install cmxs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We cannot prevent dune from building them at the moment (`nodynlink` is disabled, and doesn't work with PIE executables). However disable installing all these files to avoid the .spec file complaining about additional cmxs files appearing when opam packages are reorganized. Signed-off-by: Edwin Török --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 337e4dad88c..d4c2bd25d5f 100644 --- a/Makefile +++ b/Makefile @@ -274,6 +274,7 @@ install: build doc sdk doc-json mkdir -p $(DESTDIR)$(SDKDIR) cp -r $(XAPISDK)/* $(DESTDIR)$(SDKDIR) find $(DESTDIR)$(SDKDIR) -type f -exec chmod 644 {} \; + find $(DESTDIR) -name '*.cmxs' -delete uninstall: # only removes what was installed with `dune install` From 5184c6e02fd5f2fc361a73988770002e21d2298a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Mon, 16 Sep 2024 09:16:06 +0100 Subject: [PATCH 010/141] CP-51479: [maintenance]: reduce scope of dune aliases MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 'update-dm-lifecycle' and 'python' only exists in a single directory, tell 'dune' exactly where to look. This speeds up incremental builds: ``` dune build @ocaml/xapi-storage/python/xapi/storage/api/v5/python --profile=release ran 3.41 ± 0.18 times faster than dune build @python --profile=release dune build @ocaml/idl/update-dm-lifecycle -j 8 --profile=release ran 2.13 ± 0.19 times faster than dune build @update-dm-lifecycle -j 8 --profile=release --auto-promote ``` ``` Benchmark 2: dune build @ocaml/xapi-storage/python/xapi/storage/api/v5/python --profile=release Time (mean ± σ): 288.7 ms ± 3.7 ms [User: 229.5 ms, System: 58.9 ms] Range (min … max): 282.5 ms … 293.5 ms 10 runs Benchmark 2: dune build @ocaml/idl/update-dm-lifecycle -j 8 --profile=release Time (mean ± σ): 581.4 ms ± 15.8 ms [User: 442.5 ms, System: 175.8 ms] Range (min … max): 550.5 ms … 602.2 ms 10 runs ``` Signed-off-by: Edwin Török --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index d4c2bd25d5f..7d56b541300 100644 --- a/Makefile +++ b/Makefile @@ -12,9 +12,9 @@ OPTMANDIR ?= $(OPTDIR)/man/man1/ # this is typically used when we're not building from a git repo build: [ -z "${XAPI_VERSION}" ] || (sed -i '/(version.*)/d' dune-project && echo "(version ${XAPI_VERSION})" >> dune-project) - dune build @update-dm-lifecycle -j $(JOBS) --profile=$(PROFILE) --auto-promote || dune build @update-dm-lifecycle -j $(JOBS) --profile=$(PROFILE) --auto-promote + dune build @ocaml/idl/update-dm-lifecycle -j $(JOBS) --profile=$(PROFILE) --auto-promote || dune build @ocaml/idl/update-dm-lifecycle -j $(JOBS) --profile=$(PROFILE) --auto-promote dune build @install -j $(JOBS) --profile=$(PROFILE) - dune build @python --profile=$(PROFILE) + dune build @ocaml/xapi-storage/python/xapi/storage/api/v5/python --profile=$(PROFILE) # Quickly verify that the code compiles, without actually building it check: From 53bcd6104643e89347771d008aefc4ef891822ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Sun, 15 Sep 2024 22:40:28 +0100 Subject: [PATCH 011/141] CP-51479: [maintenance]: fix opam dependencies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use `opam-dune-lint`. This will make it easier to later merge opam packages and keep the merged package updated correctly. Although `opam-dune-lint` doesn't find all problems yet, see: https://github.com/ocurrent/opam-dune-lint/issues/71 Signed-off-by: Edwin Török --- clock.opam | 1 + cohttp-posix.opam | 2 +- cohttp-posix.opam.template | 2 +- dune-project | 77 +++++++++++++++++++++++++++- ezxenstore.opam | 4 +- ezxenstore.opam.template | 4 +- forkexec.opam | 2 + gzip.opam | 2 +- gzip.opam.template | 2 +- http-lib.opam | 6 +++ message-switch-async.opam | 8 ++- message-switch-async.opam.template | 8 ++- message-switch-cli.opam | 2 +- message-switch-cli.opam.template | 2 +- message-switch-core.opam | 2 + message-switch-lwt.opam | 2 +- message-switch-lwt.opam.template | 2 +- message-switch.opam | 2 +- message-switch.opam.template | 2 +- pciutil.opam | 2 +- pciutil.opam.template | 2 +- rrd-transport.opam | 3 ++ rrd2csv.opam | 3 +- rrd2csv.opam.template | 3 +- rrdd-plugin.opam | 1 + rrdd-plugins.opam | 7 ++- rrdd-plugins.opam.template | 7 ++- rrddump.opam | 2 +- rrddump.opam.template | 2 +- safe-resources.opam | 2 +- safe-resources.opam.template | 2 +- sexpr.opam | 4 +- sexpr.opam.template | 4 +- stunnel.opam | 2 +- stunnel.opam.template | 2 +- uuid.opam | 3 +- uuid.opam.template | 3 +- varstored-guard.opam | 6 +++ varstored-guard.opam.template | 6 +++ vhd-format-lwt.opam | 4 ++ vhd-format.opam | 2 + vhd-format.opam.template | 2 + vhd-tool.opam | 9 ++++ xapi-cli-protocol.opam | 3 +- xapi-cli-protocol.opam.template | 3 +- xapi-client.opam | 3 +- xapi-client.opam.template | 3 +- xapi-compression.opam | 2 +- xapi-compression.opam.template | 2 +- xapi-consts.opam | 2 +- xapi-consts.opam.template | 2 +- xapi-datamodel.opam | 4 +- xapi-datamodel.opam.template | 4 +- xapi-expiry-alerts.opam | 2 +- xapi-expiry-alerts.opam.template | 2 +- xapi-idl.opam | 10 +++- xapi-idl.opam.template | 10 +++- xapi-inventory.opam | 2 +- xapi-inventory.opam.template | 2 +- xapi-log.opam | 7 ++- xapi-log.opam.template | 7 ++- xapi-nbd.opam | 4 +- xapi-nbd.opam.template | 4 +- xapi-networkd.opam | 5 ++ xapi-open-uri.opam | 3 +- xapi-open-uri.opam.template | 3 +- xapi-rrd.opam | 2 +- xapi-rrd.opam.template | 2 +- xapi-rrdd-plugin.opam | 2 +- xapi-rrdd-plugin.opam.template | 2 +- xapi-rrdd.opam | 10 ++++ xapi-schema.opam | 2 +- xapi-schema.opam.template | 2 +- xapi-sdk.opam | 2 + xapi-squeezed.opam | 3 +- xapi-squeezed.opam.template | 3 +- xapi-stdext-threads.opam | 2 + xapi-stdext-unix.opam | 2 + xapi-storage-cli.opam | 2 +- xapi-storage-cli.opam.template | 2 +- xapi-storage-script.opam | 2 +- xapi-storage-script.opam.template | 2 +- xapi-storage.opam | 4 +- xapi-storage.opam.template | 4 +- xapi-tracing-export.opam | 4 ++ xapi-tracing.opam | 1 + xapi-types.opam | 2 +- xapi-types.opam.template | 2 +- xapi-xenopsd-cli.opam | 7 ++- xapi-xenopsd-cli.opam.template | 7 ++- xapi-xenopsd-simulator.opam | 2 +- xapi-xenopsd-simulator.opam.template | 2 +- xapi-xenopsd-xc.opam | 10 +++- xapi-xenopsd-xc.opam.template | 10 +++- xapi-xenopsd.opam | 4 +- xapi-xenopsd.opam.template | 4 +- xapi.opam | 23 ++++++++- xe.opam | 8 ++- xe.opam.template | 8 ++- xen-api-client-async.opam | 6 ++- xen-api-client-async.opam.template | 6 ++- xen-api-client-lwt.opam | 5 +- xen-api-client-lwt.opam.template | 5 +- xml-light2.opam | 2 +- xml-light2.opam.template | 2 +- zstd.opam | 2 +- zstd.opam.template | 2 +- 107 files changed, 382 insertions(+), 96 deletions(-) diff --git a/clock.opam b/clock.opam index 73192316295..705f280d2b9 100644 --- a/clock.opam +++ b/clock.opam @@ -11,6 +11,7 @@ depends: [ "ocaml" {>= "4.12"} "alcotest" {with-test} "astring" + "fmt" "mtime" "ptime" "xapi-log" {= version} diff --git a/cohttp-posix.opam b/cohttp-posix.opam index 82bd187a844..e4aba962fa3 100644 --- a/cohttp-posix.opam +++ b/cohttp-posix.opam @@ -11,7 +11,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "cohttp" ] synopsis: "Library required by xapi" diff --git a/cohttp-posix.opam.template b/cohttp-posix.opam.template index 62e5a3961d3..4660d0c1f58 100644 --- a/cohttp-posix.opam.template +++ b/cohttp-posix.opam.template @@ -9,7 +9,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "cohttp" ] synopsis: "Library required by xapi" diff --git a/dune-project b/dune-project index 94a885046a7..fa6e37a749b 100644 --- a/dune-project +++ b/dune-project @@ -27,6 +27,7 @@ (ocaml (>= 4.12)) (alcotest :with-test) astring + fmt mtime ptime (xapi-log (= :version)) @@ -50,9 +51,11 @@ (depends (alcotest :with-test) astring + (fmt :with-test) mustache (xapi-datamodel (= :version)) (xapi-stdext-unix (and (= :version) :with-test)) + (xapi-test-utils :with-test) ) (allow_empty) ) @@ -115,6 +118,7 @@ ocaml dune (alcotest :with-test) + (fmt :with-test) re uri (uuid :with-test) @@ -132,8 +136,12 @@ cohttp-posix dune cohttp + ptime + result + rresult rpclib ppx_deriving_rpc + uri (xapi-log (= :version)) (xapi-open-uri (= :version)) (xapi-stdext-threads (= :version)) @@ -180,6 +188,7 @@ (xapi-stdext-threads (= :version)) (xapi-stdext-unix (= :version)) (xapi-idl (= :version)) + xenstore xenstore_transport ) ) @@ -192,13 +201,18 @@ (ocaml (>= "4.02.0")) (alcotest :with-test) astring + cmdliner + (fmt :with-test) (gzip (= :version)) (http-lib (= :version)) inotify io-page + ipaddr mtime + polly ppx_deriving_rpc rpclib + uri (ezxenstore (= :version)) (uuid (= :version)) xapi-backtrace @@ -207,6 +221,11 @@ (xapi-stdext-threads (= :version)) (xapi-stdext-unix (= :version)) xapi-tracing + xenctrl + xenstore + xenstore_transport + xmlm + yojson ) ) @@ -238,10 +257,14 @@ base-threads (forkexec (= :version)) (http-lib (= :version)) + integers mtime netlink re + result + rresult rpclib + uri (xapi-idl (= :version)) xapi-inventory (xapi-stdext-pervasives (= :version)) @@ -250,6 +273,7 @@ (xapi-stdext-unix (= :version)) xapi-test-utils (xen-api-client (= :version)) + yojson ) ) @@ -308,19 +332,27 @@ (depends alcotest ; needed for the quicktest binary angstrom + astring base-threads base64 + (bos :with-test) cdrom + cmdliner + cohttp conf-pam (crowbar :with-test) + cstruct ctypes ctypes-foreign domain-name (ezxenstore (= :version)) - (fmt :with-test) + fmt hex (http-lib (and :with-test (= :version))) ; the public library is only used for testing + integers ipaddr + logs + magic-mime mirage-crypto mirage-crypto-pk (mirage-crypto-rng (>= "0.11.0")) @@ -329,21 +361,31 @@ opentelemetry-client-ocurl pci (pciutil (= :version)) + polly ppx_deriving_rpc ppx_sexp_conv ppx_deriving psq + ptime qcheck-alcotest + qcheck-core + re + result rpclib (rrdd-plugin (= :version)) rresult sexpr + sexplib + sexplib0 sha (stunnel (= :version)) tar tar-unix + uri (uuid (= :version)) + uuidm x509 + xapi-backtrace (xapi-client (= :version)) (xapi-cli-protocol (= :version)) (xapi-consts (= :version)) @@ -362,6 +404,9 @@ (xapi-tracing (= :version)) (xapi-types (= :version)) (xapi-xenopsd (= :version)) + xenctrl ; for quicktest + xenstore_transport + xmlm (xml-light2 (= :version)) yojson (zstd (= :version)) @@ -391,6 +436,10 @@ (tags ("org.mirage" "org:xapi-project")) (depends (alcotest-lwt :with-test) + astring + bigarray-compat + cmdliner + cohttp cohttp-lwt conf-libssl (cstruct (>= "3.0.0")) @@ -398,13 +447,18 @@ (forkexec (= :version)) io-page lwt + lwt_ssl + nbd nbd-unix ppx_cstruct ppx_deriving_rpc re + result rpclib + ssl sha tar + uri (vhd-format (= :version)) (vhd-format-lwt (= :version)) (xapi-idl (= :version)) @@ -438,9 +492,13 @@ This package provides an Lwt compatible interface to the library.") (ocaml (and (>= "4.02.3") (< "5.0.0"))) (alcotest :with-test) (alcotest-lwt :with-test) + bigarray-compat (cstruct (< "6.1.0")) + cstruct-lwt + (fmt :with-test) (lwt (>= "3.2.0")) (mirage-block (>= "2.0.1")) + rresult (vhd-format (= :version)) (io-page (and :with-test (>= "2.4.0"))) ) @@ -486,8 +544,11 @@ This package provides an Lwt compatible interface to the library.") (depends (alcotest :with-test) astring + bigarray-compat cstruct crc + (fmt :with-test) + rpclib yojson (xapi-idl (= :version)) (xapi-rrd (= :version)) @@ -518,6 +579,8 @@ This package provides an Lwt compatible interface to the library.") ppx_sexp_conv rpclib sexplib + sexplib0 + uri (xapi-log (= :version)) (xapi-stdext-threads (= :version)) (odoc :with-doc) @@ -554,10 +617,16 @@ This package provides an Lwt compatible interface to the library.") (alcotest :with-test) astring (base64 (>= "3.1.0")) + fmt + ipaddr + mtime + ppx_deriving_rpc + (qcheck-core :with-test) rpclib (safe-resources(= :version)) sha (stunnel (= :version)) + uri (uuid (= :version)) xapi-backtrace (xapi-idl (= :version)) @@ -580,11 +649,13 @@ This package provides an Lwt compatible interface to the library.") (synopsis "Process-spawning library") (description "Client and server library to spawn processes.") (depends + astring base-threads (fd-send-recv (>= "2.0.0")) ppx_deriving_rpc rpclib (uuid (= :version)) + xapi-backtrace (xapi-log (= :version)) (xapi-stdext-pervasives (= :version)) (xapi-stdext-unix (= :version)) @@ -661,6 +732,8 @@ This package provides an Lwt compatible interface to the library.") (depends base-threads base-unix + (alcotest :with-test) + (fmt :with-test) (odoc :with-doc) (xapi-stdext-pervasives (= :version)) (mtime :with-test) @@ -675,10 +748,12 @@ This package provides an Lwt compatible interface to the library.") (depends (ocaml (>= 4.12.0)) (alcotest :with-test) + astring base-unix (bisect_ppx :with-test) (fd-send-recv (>= 2.0.0)) fmt + integers (mtime (and (>= 2.0.0) :with-test)) (logs :with-test) (qcheck-core (and (>= 0.21.2) :with-test)) diff --git a/ezxenstore.opam b/ezxenstore.opam index 5d88113b816..d5a1ff58de2 100644 --- a/ezxenstore.opam +++ b/ezxenstore.opam @@ -10,8 +10,8 @@ dev-repo: "git+https://github.com/xapi-project/xen-api.git" build: [[ "dune" "build" "-p" name "-j" jobs ]] depends: [ "ocaml" - "dune" {>= "1.4"} - "cmdliner" {with-test & >= "1.1.0"} + "dune" {>= "3.15"} + "cmdliner" "logs" "uuidm" "xapi-stdext-unix" diff --git a/ezxenstore.opam.template b/ezxenstore.opam.template index 1a3283178aa..4f7eb3447fa 100644 --- a/ezxenstore.opam.template +++ b/ezxenstore.opam.template @@ -8,8 +8,8 @@ dev-repo: "git+https://github.com/xapi-project/xen-api.git" build: [[ "dune" "build" "-p" name "-j" jobs ]] depends: [ "ocaml" - "dune" {>= "1.4"} - "cmdliner" {with-test & >= "1.1.0"} + "dune" {>= "3.15"} + "cmdliner" "logs" "uuidm" "xapi-stdext-unix" diff --git a/forkexec.opam b/forkexec.opam index 6d6d2504488..68ca75e06df 100644 --- a/forkexec.opam +++ b/forkexec.opam @@ -9,11 +9,13 @@ homepage: "https://xapi-project.github.io/" bug-reports: "https://github.com/xapi-project/xen-api/issues" depends: [ "dune" {>= "3.15"} + "astring" "base-threads" "fd-send-recv" {>= "2.0.0"} "ppx_deriving_rpc" "rpclib" "uuid" {= version} + "xapi-backtrace" "xapi-log" {= version} "xapi-stdext-pervasives" {= version} "xapi-stdext-unix" {= version} diff --git a/gzip.opam b/gzip.opam index 59901c80ee6..7a04554f2a9 100644 --- a/gzip.opam +++ b/gzip.opam @@ -11,7 +11,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "xapi-compression" ] synopsis: "Library required by xapi" diff --git a/gzip.opam.template b/gzip.opam.template index 8e7be0f3783..7c960776d88 100644 --- a/gzip.opam.template +++ b/gzip.opam.template @@ -9,7 +9,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "xapi-compression" ] synopsis: "Library required by xapi" diff --git a/http-lib.opam b/http-lib.opam index e8a5de4ddc9..df1b7735eb7 100644 --- a/http-lib.opam +++ b/http-lib.opam @@ -13,10 +13,16 @@ depends: [ "alcotest" {with-test} "astring" "base64" {>= "3.1.0"} + "fmt" + "ipaddr" + "mtime" + "ppx_deriving_rpc" + "qcheck-core" {with-test} "rpclib" "safe-resources" {= version} "sha" "stunnel" {= version} + "uri" "uuid" {= version} "xapi-backtrace" "xapi-idl" {= version} diff --git a/message-switch-async.opam b/message-switch-async.opam index 1192cb6cb9e..ac53e522c21 100644 --- a/message-switch-async.opam +++ b/message-switch-async.opam @@ -14,9 +14,15 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "odoc" {with-doc} "async" {>= "v0.9.0"} + "async_kernel" + "async_unix" + "base" + "core" + "core_kernel" + "core_unix" "cohttp-async" {>= "1.0.2"} "message-switch-core" ] diff --git a/message-switch-async.opam.template b/message-switch-async.opam.template index a6828673032..aaa69dc257e 100644 --- a/message-switch-async.opam.template +++ b/message-switch-async.opam.template @@ -12,9 +12,15 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "odoc" {with-doc} "async" {>= "v0.9.0"} + "async_kernel" + "async_unix" + "base" + "core" + "core_kernel" + "core_unix" "cohttp-async" {>= "1.0.2"} "message-switch-core" ] diff --git a/message-switch-cli.opam b/message-switch-cli.opam index d576f9f3a42..ccbea62e0b2 100644 --- a/message-switch-cli.opam +++ b/message-switch-cli.opam @@ -14,7 +14,7 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "odoc" {with-doc} "cmdliner" "message-switch-unix" diff --git a/message-switch-cli.opam.template b/message-switch-cli.opam.template index dbf5de7d80c..0d9d0a1ec6d 100644 --- a/message-switch-cli.opam.template +++ b/message-switch-cli.opam.template @@ -12,7 +12,7 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "odoc" {with-doc} "cmdliner" "message-switch-unix" diff --git a/message-switch-core.opam b/message-switch-core.opam index 2fd00d31457..a6b183bdd7f 100644 --- a/message-switch-core.opam +++ b/message-switch-core.opam @@ -16,6 +16,8 @@ depends: [ "ppx_sexp_conv" "rpclib" "sexplib" + "sexplib0" + "uri" "xapi-log" {= version} "xapi-stdext-threads" {= version} "odoc" {with-doc} diff --git a/message-switch-lwt.opam b/message-switch-lwt.opam index a52b3eca124..3688d40a188 100644 --- a/message-switch-lwt.opam +++ b/message-switch-lwt.opam @@ -14,7 +14,7 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "odoc" {with-doc} "cohttp-lwt-unix" "lwt" {>= "3.0.0"} diff --git a/message-switch-lwt.opam.template b/message-switch-lwt.opam.template index 766fbbceaa2..b038e76b867 100644 --- a/message-switch-lwt.opam.template +++ b/message-switch-lwt.opam.template @@ -12,7 +12,7 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "odoc" {with-doc} "cohttp-lwt-unix" "lwt" {>= "3.0.0"} diff --git a/message-switch.opam b/message-switch.opam index b09cec4ca7c..4ee77fdca5d 100644 --- a/message-switch.opam +++ b/message-switch.opam @@ -15,7 +15,7 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "odoc" {with-doc} "cmdliner" "cohttp-async" {with-test} diff --git a/message-switch.opam.template b/message-switch.opam.template index 793c8aceaa5..8a898c41747 100644 --- a/message-switch.opam.template +++ b/message-switch.opam.template @@ -13,7 +13,7 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "odoc" {with-doc} "cmdliner" "cohttp-async" {with-test} diff --git a/pciutil.opam b/pciutil.opam index e4c52c1629a..4e93f06fccf 100644 --- a/pciutil.opam +++ b/pciutil.opam @@ -11,7 +11,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "xapi-stdext-unix" ] synopsis: "Library required by xapi" diff --git a/pciutil.opam.template b/pciutil.opam.template index fb0823e55c7..48f9d097162 100644 --- a/pciutil.opam.template +++ b/pciutil.opam.template @@ -9,7 +9,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "xapi-stdext-unix" ] synopsis: "Library required by xapi" diff --git a/rrd-transport.opam b/rrd-transport.opam index 07fe41dd8cc..441dbeebbd9 100644 --- a/rrd-transport.opam +++ b/rrd-transport.opam @@ -12,8 +12,11 @@ depends: [ "dune" {>= "3.15"} "alcotest" {with-test} "astring" + "bigarray-compat" "cstruct" "crc" + "fmt" {with-test} + "rpclib" "yojson" "xapi-idl" {= version} "xapi-rrd" {= version} diff --git a/rrd2csv.opam b/rrd2csv.opam index cb36ed57a70..4d71ee4468b 100644 --- a/rrd2csv.opam +++ b/rrd2csv.opam @@ -14,13 +14,14 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "http-lib" "xapi-client" "xapi-idl" "xapi-rrd" "xapi-stdext-std" "xapi-stdext-threads" + "xmlm" ] synopsis: "Convert XenServer RRD data into CSV format" url { diff --git a/rrd2csv.opam.template b/rrd2csv.opam.template index 0f598244249..e86d004589e 100644 --- a/rrd2csv.opam.template +++ b/rrd2csv.opam.template @@ -12,13 +12,14 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "http-lib" "xapi-client" "xapi-idl" "xapi-rrd" "xapi-stdext-std" "xapi-stdext-threads" + "xmlm" ] synopsis: "Convert XenServer RRD data into CSV format" url { diff --git a/rrdd-plugin.opam b/rrdd-plugin.opam index 5b113952b04..f59d26a365e 100644 --- a/rrdd-plugin.opam +++ b/rrdd-plugin.opam @@ -20,6 +20,7 @@ depends: [ "xapi-stdext-threads" {= version} "xapi-stdext-unix" {= version} "xapi-idl" {= version} + "xenstore" "xenstore_transport" "odoc" {with-doc} ] diff --git a/rrdd-plugins.opam b/rrdd-plugins.opam index e0a4ac91af9..4ca427e4561 100644 --- a/rrdd-plugins.opam +++ b/rrdd-plugins.opam @@ -12,18 +12,23 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] synopsis: "Plugins registering to the RRD daemon and exposing various metrics" depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} + "astring" "base-threads" "base-unix" + "cstruct" "cstruct-unix" "ezxenstore" "inotify" + "ppx_cstruct" "rrdd-plugin" + "stringext" "uuid" "xapi-stdext-std" "xapi-stdext-unix" "xenctrl" "xenstore" + "xenstore_transport" "mtime" ] url { diff --git a/rrdd-plugins.opam.template b/rrdd-plugins.opam.template index 9db3f7e4a75..218b5f3c7bc 100644 --- a/rrdd-plugins.opam.template +++ b/rrdd-plugins.opam.template @@ -10,18 +10,23 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] synopsis: "Plugins registering to the RRD daemon and exposing various metrics" depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} + "astring" "base-threads" "base-unix" + "cstruct" "cstruct-unix" "ezxenstore" "inotify" + "ppx_cstruct" "rrdd-plugin" + "stringext" "uuid" "xapi-stdext-std" "xapi-stdext-unix" "xenctrl" "xenstore" + "xenstore_transport" "mtime" ] url { diff --git a/rrddump.opam b/rrddump.opam index b52fb1cb46b..84464b418d3 100644 --- a/rrddump.opam +++ b/rrddump.opam @@ -8,7 +8,7 @@ authors: "John Else" tags: "org:xapi-project" homepage: "https://github.com/xapi-project/xen-api" bug-reports: "https://github.com/xapi-project/xen-api/issues" -depends: ["rrd-transport" "xapi-rrd" "xmlm"] +depends: ["rrd-transport" "xapi-rrd" "xmlm" "dune" {>= "3.15"}] build: ["dune" "build" "-p" name "-j" jobs] dev-repo: "git+https://github.com/xapi-project/xen-api.git" url { diff --git a/rrddump.opam.template b/rrddump.opam.template index c97c7947e05..1237bb624d5 100644 --- a/rrddump.opam.template +++ b/rrddump.opam.template @@ -6,7 +6,7 @@ authors: "John Else" tags: "org:xapi-project" homepage: "https://github.com/xapi-project/xen-api" bug-reports: "https://github.com/xapi-project/xen-api/issues" -depends: ["rrd-transport" "xapi-rrd" "xmlm"] +depends: ["rrd-transport" "xapi-rrd" "xmlm" "dune" {>= "3.15"}] build: ["dune" "build" "-p" name "-j" jobs] dev-repo: "git+https://github.com/xapi-project/xen-api.git" url { diff --git a/safe-resources.opam b/safe-resources.opam index 18c9270b966..b8f0e5b615b 100644 --- a/safe-resources.opam +++ b/safe-resources.opam @@ -13,7 +13,7 @@ build: [ available: [ os = "linux" | os = "macos" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "fmt" "logs" "xapi-backtrace" diff --git a/safe-resources.opam.template b/safe-resources.opam.template index b02f53a13fb..ae64f0c2d53 100644 --- a/safe-resources.opam.template +++ b/safe-resources.opam.template @@ -11,7 +11,7 @@ build: [ available: [ os = "linux" | os = "macos" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "fmt" "logs" "xapi-backtrace" diff --git a/sexpr.opam b/sexpr.opam index aded988a188..daa33dc6619 100644 --- a/sexpr.opam +++ b/sexpr.opam @@ -11,8 +11,10 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" | os = "macos" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} + "alcotest" {with-test} "astring" + "rresult" {with-test} "qcheck-core" {with-test} "xapi-stdext-threads" ] diff --git a/sexpr.opam.template b/sexpr.opam.template index d83e0f2a493..392b2e77c07 100644 --- a/sexpr.opam.template +++ b/sexpr.opam.template @@ -9,8 +9,10 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" | os = "macos" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} + "alcotest" {with-test} "astring" + "rresult" {with-test} "qcheck-core" {with-test} "xapi-stdext-threads" ] diff --git a/stunnel.opam b/stunnel.opam index 3831cdec076..d28894c4d8c 100644 --- a/stunnel.opam +++ b/stunnel.opam @@ -11,7 +11,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "astring" "forkexec" "safe-resources" diff --git a/stunnel.opam.template b/stunnel.opam.template index 1e96c54c8d8..be9d1ca0764 100644 --- a/stunnel.opam.template +++ b/stunnel.opam.template @@ -9,7 +9,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "astring" "forkexec" "safe-resources" diff --git a/uuid.opam b/uuid.opam index fa7da3a7317..c13b0c5ecfc 100644 --- a/uuid.opam +++ b/uuid.opam @@ -14,8 +14,9 @@ build: [ available: [ os = "linux" | os = "macos" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "alcotest" {with-test} + "fmt" {with-test} "uuidm" ] synopsis: "Library required by xapi" diff --git a/uuid.opam.template b/uuid.opam.template index daa9cee8dfe..aacc8f63c2b 100644 --- a/uuid.opam.template +++ b/uuid.opam.template @@ -12,8 +12,9 @@ build: [ available: [ os = "linux" | os = "macos" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "alcotest" {with-test} + "fmt" {with-test} "uuidm" ] synopsis: "Library required by xapi" diff --git a/varstored-guard.opam b/varstored-guard.opam index 2e794c9309b..d98b387a69b 100644 --- a/varstored-guard.opam +++ b/varstored-guard.opam @@ -9,10 +9,16 @@ bug-reports: "https://github.com/xapi-project/xen-api" build: [["dune" "build" "-p" name "-j" jobs]] run-test: [[ "dune" "runtest" "-p" name "-j" jobs ]] depends: [ + "dune" {>= "3.15"} "cmdliner" + "alcotest" {with-test} "cohttp-lwt" + "fmt" {with-test} + "lwt" {with-test} "message-switch-lwt" + "rpclib" "rpclib-lwt" + "uri" {with-test} "xapi-idl" "xen-api-client-lwt" "alcotest-lwt" {with-test} diff --git a/varstored-guard.opam.template b/varstored-guard.opam.template index d58715bcc50..ea9ccf6aa68 100644 --- a/varstored-guard.opam.template +++ b/varstored-guard.opam.template @@ -7,10 +7,16 @@ bug-reports: "https://github.com/xapi-project/xen-api" build: [["dune" "build" "-p" name "-j" jobs]] run-test: [[ "dune" "runtest" "-p" name "-j" jobs ]] depends: [ + "dune" {>= "3.15"} "cmdliner" + "alcotest" {with-test} "cohttp-lwt" + "fmt" {with-test} + "lwt" {with-test} "message-switch-lwt" + "rpclib" "rpclib-lwt" + "uri" {with-test} "xapi-idl" "xen-api-client-lwt" "alcotest-lwt" {with-test} diff --git a/vhd-format-lwt.opam b/vhd-format-lwt.opam index e89b1cfdc7c..b2140a2d07e 100644 --- a/vhd-format-lwt.opam +++ b/vhd-format-lwt.opam @@ -20,9 +20,13 @@ depends: [ "ocaml" {>= "4.02.3" & < "5.0.0"} "alcotest" {with-test} "alcotest-lwt" {with-test} + "bigarray-compat" "cstruct" {< "6.1.0"} + "cstruct-lwt" + "fmt" {with-test} "lwt" {>= "3.2.0"} "mirage-block" {>= "2.0.1"} + "rresult" "vhd-format" {= version} "io-page" {with-test & >= "2.4.0"} "odoc" {with-doc} diff --git a/vhd-format.opam b/vhd-format.opam index 896d90139a9..59c7d8122a8 100644 --- a/vhd-format.opam +++ b/vhd-format.opam @@ -18,7 +18,9 @@ doc: "https://mirage.github.io/ocaml-vhd/" bug-reports: "https://github.com/mirage/ocaml-vhd/issues" depends: [ "ocaml" {>= "4.03.0"} + "bigarray-compat" "cstruct" {>= "1.9" & < "6.1.0"} + "dune" {>= "3.15"} "io-page" "rresult" {>= "0.3.0"} "uuidm" {>= "0.9.6"} diff --git a/vhd-format.opam.template b/vhd-format.opam.template index 77a5c6ad585..382124b10dd 100644 --- a/vhd-format.opam.template +++ b/vhd-format.opam.template @@ -16,7 +16,9 @@ doc: "https://mirage.github.io/ocaml-vhd/" bug-reports: "https://github.com/mirage/ocaml-vhd/issues" depends: [ "ocaml" {>= "4.03.0"} + "bigarray-compat" "cstruct" {>= "1.9" & < "6.1.0"} + "dune" {>= "3.15"} "io-page" "rresult" {>= "0.3.0"} "uuidm" {>= "0.9.6"} diff --git a/vhd-tool.opam b/vhd-tool.opam index f0135ab7a41..14f0c3c30c3 100644 --- a/vhd-tool.opam +++ b/vhd-tool.opam @@ -10,6 +10,10 @@ bug-reports: "https://github.com/xapi-project/xen-api/issues" depends: [ "dune" {>= "3.15"} "alcotest-lwt" {with-test} + "astring" + "bigarray-compat" + "cmdliner" + "cohttp" "cohttp-lwt" "conf-libssl" "cstruct" {>= "3.0.0"} @@ -17,13 +21,18 @@ depends: [ "forkexec" {= version} "io-page" "lwt" + "lwt_ssl" + "nbd" "nbd-unix" "ppx_cstruct" "ppx_deriving_rpc" "re" + "result" "rpclib" + "ssl" "sha" "tar" + "uri" "vhd-format" {= version} "vhd-format-lwt" {= version} "xapi-idl" {= version} diff --git a/xapi-cli-protocol.opam b/xapi-cli-protocol.opam index ba721dfa943..31150003aa5 100644 --- a/xapi-cli-protocol.opam +++ b/xapi-cli-protocol.opam @@ -12,8 +12,9 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "base-threads" + "alcotest" {with-test} "xapi-consts" "xapi-datamodel" "xapi-stdext-std" diff --git a/xapi-cli-protocol.opam.template b/xapi-cli-protocol.opam.template index 65ba997bf48..6234f36c294 100644 --- a/xapi-cli-protocol.opam.template +++ b/xapi-cli-protocol.opam.template @@ -10,8 +10,9 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "base-threads" + "alcotest" {with-test} "xapi-consts" "xapi-datamodel" "xapi-stdext-std" diff --git a/xapi-client.opam b/xapi-client.opam index e440122eba8..9d54de2cf11 100644 --- a/xapi-client.opam +++ b/xapi-client.opam @@ -12,10 +12,11 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "mtime" "sexpr" "base-threads" + "rpclib" "uuid" "xapi-consts" "xapi-datamodel" diff --git a/xapi-client.opam.template b/xapi-client.opam.template index 090922e0c00..2844dc8a60b 100644 --- a/xapi-client.opam.template +++ b/xapi-client.opam.template @@ -10,10 +10,11 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "mtime" "sexpr" "base-threads" + "rpclib" "uuid" "xapi-consts" "xapi-datamodel" diff --git a/xapi-compression.opam b/xapi-compression.opam index 5395517c034..a6db319460b 100644 --- a/xapi-compression.opam +++ b/xapi-compression.opam @@ -11,7 +11,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "forkexec" "safe-resources" "xapi-log" diff --git a/xapi-compression.opam.template b/xapi-compression.opam.template index 6947af885ac..437d84b2e3c 100644 --- a/xapi-compression.opam.template +++ b/xapi-compression.opam.template @@ -9,7 +9,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "forkexec" "safe-resources" "xapi-log" diff --git a/xapi-consts.opam b/xapi-consts.opam index 506569a982f..2b4726399e5 100644 --- a/xapi-consts.opam +++ b/xapi-consts.opam @@ -12,7 +12,7 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "dune-build-info" "xapi-inventory" ] diff --git a/xapi-consts.opam.template b/xapi-consts.opam.template index 90271150f6a..4d7ad8652db 100644 --- a/xapi-consts.opam.template +++ b/xapi-consts.opam.template @@ -10,7 +10,7 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "dune-build-info" "xapi-inventory" ] diff --git a/xapi-datamodel.opam b/xapi-datamodel.opam index d31a2178b78..4bc3b8ab90f 100644 --- a/xapi-datamodel.opam +++ b/xapi-datamodel.opam @@ -12,11 +12,13 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "mustache" "ppx_deriving_rpc" "rpclib" "base-threads" + "sexplib0" + "xapi-backtrace" "xapi-consts" "xapi-schema" "xapi-stdext-date" diff --git a/xapi-datamodel.opam.template b/xapi-datamodel.opam.template index b3ee146ed81..aa34087cffc 100644 --- a/xapi-datamodel.opam.template +++ b/xapi-datamodel.opam.template @@ -10,11 +10,13 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "mustache" "ppx_deriving_rpc" "rpclib" "base-threads" + "sexplib0" + "xapi-backtrace" "xapi-consts" "xapi-schema" "xapi-stdext-date" diff --git a/xapi-expiry-alerts.opam b/xapi-expiry-alerts.opam index 178652b00dc..a9dea20e278 100644 --- a/xapi-expiry-alerts.opam +++ b/xapi-expiry-alerts.opam @@ -16,7 +16,7 @@ bug-reports: "https://github.com/xapi-project/xen-api/issues" depends: [ "alcotest" {with-test} "ocaml" - "dune" + "dune" {>= "3.15"} "astring" "xapi-client" "xapi-consts" diff --git a/xapi-expiry-alerts.opam.template b/xapi-expiry-alerts.opam.template index f952588f237..e5f08f213d0 100644 --- a/xapi-expiry-alerts.opam.template +++ b/xapi-expiry-alerts.opam.template @@ -14,7 +14,7 @@ bug-reports: "https://github.com/xapi-project/xen-api/issues" depends: [ "alcotest" {with-test} "ocaml" - "dune" + "dune" {>= "3.15"} "astring" "xapi-client" "xapi-consts" diff --git a/xapi-idl.opam b/xapi-idl.opam index afe181351fd..c1fff027077 100644 --- a/xapi-idl.opam +++ b/xapi-idl.opam @@ -11,7 +11,7 @@ build: [["dune" "build" "-p" name "-j" jobs]] run-test: [[ "dune" "runtest" "-p" name "-j" jobs ]] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "alcotest" {with-test} "fmt" {with-test} "astring" @@ -27,13 +27,19 @@ depends: [ "message-switch-unix" "mtime" "ppx_deriving_rpc" + "ppx_deriving" "ppx_sexp_conv" "re" - "xapi-rrd" + "result" + "rpclib" + "rresult" "sexplib" + "sexplib0" "uri" + "uuidm" "xapi-backtrace" "xapi-open-uri" + "xapi-rrd" "xapi-stdext-date" "xapi-stdext-pervasives" "xapi-stdext-std" diff --git a/xapi-idl.opam.template b/xapi-idl.opam.template index 02a5c85a08f..beea3845af6 100644 --- a/xapi-idl.opam.template +++ b/xapi-idl.opam.template @@ -9,7 +9,7 @@ build: [["dune" "build" "-p" name "-j" jobs]] run-test: [[ "dune" "runtest" "-p" name "-j" jobs ]] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "alcotest" {with-test} "fmt" {with-test} "astring" @@ -25,13 +25,19 @@ depends: [ "message-switch-unix" "mtime" "ppx_deriving_rpc" + "ppx_deriving" "ppx_sexp_conv" "re" - "xapi-rrd" + "result" + "rpclib" + "rresult" "sexplib" + "sexplib0" "uri" + "uuidm" "xapi-backtrace" "xapi-open-uri" + "xapi-rrd" "xapi-stdext-date" "xapi-stdext-pervasives" "xapi-stdext-std" diff --git a/xapi-inventory.opam b/xapi-inventory.opam index 3783ff02467..c54eaf68746 100644 --- a/xapi-inventory.opam +++ b/xapi-inventory.opam @@ -16,7 +16,7 @@ build: [ depends: [ "ocaml" "ocamlfind" {build} - "dune" {build} + "dune" {>= "3.15"} "base-threads" "astring" "xapi-stdext-unix" diff --git a/xapi-inventory.opam.template b/xapi-inventory.opam.template index 7d6338dc108..f9504007f19 100644 --- a/xapi-inventory.opam.template +++ b/xapi-inventory.opam.template @@ -14,7 +14,7 @@ build: [ depends: [ "ocaml" "ocamlfind" {build} - "dune" {build} + "dune" {>= "3.15"} "base-threads" "astring" "xapi-stdext-unix" diff --git a/xapi-log.opam b/xapi-log.opam index 416fb3894b4..d83f9bec7c6 100644 --- a/xapi-log.opam +++ b/xapi-log.opam @@ -13,7 +13,12 @@ build: [ available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} + "astring" + "fmt" + "logs" + "mtime" + "xapi-backtrace" "xapi-stdext-pervasives" ] synopsis: "Library required by xapi" diff --git a/xapi-log.opam.template b/xapi-log.opam.template index 502e26940cf..00b5cce6fd5 100644 --- a/xapi-log.opam.template +++ b/xapi-log.opam.template @@ -11,7 +11,12 @@ build: [ available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} + "astring" + "fmt" + "logs" + "mtime" + "xapi-backtrace" "xapi-stdext-pervasives" ] synopsis: "Library required by xapi" diff --git a/xapi-nbd.opam b/xapi-nbd.opam index b42a11f00e0..da583e6cbd8 100644 --- a/xapi-nbd.opam +++ b/xapi-nbd.opam @@ -12,11 +12,11 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "alcotest" {with-test} "alcotest-lwt" {with-test} "cmdliner" - "lwt" {>= "3.0.0"} + "lwt" {>= "3.0.0" & with-test} "lwt_log" "mirage-block-unix" "nbd-unix" diff --git a/xapi-nbd.opam.template b/xapi-nbd.opam.template index ef77689eecd..8e3b5c0dd40 100644 --- a/xapi-nbd.opam.template +++ b/xapi-nbd.opam.template @@ -10,11 +10,11 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "alcotest" {with-test} "alcotest-lwt" {with-test} "cmdliner" - "lwt" {>= "3.0.0"} + "lwt" {>= "3.0.0" & with-test} "lwt_log" "mirage-block-unix" "nbd-unix" diff --git a/xapi-networkd.opam b/xapi-networkd.opam index ef37bd16486..a7df883bc7d 100644 --- a/xapi-networkd.opam +++ b/xapi-networkd.opam @@ -13,10 +13,14 @@ depends: [ "base-threads" "forkexec" {= version} "http-lib" {= version} + "integers" "mtime" "netlink" "re" + "result" + "rresult" "rpclib" + "uri" "xapi-idl" {= version} "xapi-inventory" "xapi-stdext-pervasives" {= version} @@ -25,6 +29,7 @@ depends: [ "xapi-stdext-unix" {= version} "xapi-test-utils" "xen-api-client" {= version} + "yojson" "odoc" {with-doc} ] build: [ diff --git a/xapi-open-uri.opam b/xapi-open-uri.opam index 31da3b42244..bb080d75499 100644 --- a/xapi-open-uri.opam +++ b/xapi-open-uri.opam @@ -14,9 +14,10 @@ available: [ os = "linux" | os = "macos" ] depends: [ "ocaml" "cohttp" - "dune" + "dune" {>= "3.15"} "stunnel" "uri" + "xapi-backtrace" "xapi-stdext-pervasives" ] synopsis: "Library required by xapi" diff --git a/xapi-open-uri.opam.template b/xapi-open-uri.opam.template index 1542395adc5..4e3ec18d413 100644 --- a/xapi-open-uri.opam.template +++ b/xapi-open-uri.opam.template @@ -12,9 +12,10 @@ available: [ os = "linux" | os = "macos" ] depends: [ "ocaml" "cohttp" - "dune" + "dune" {>= "3.15"} "stunnel" "uri" + "xapi-backtrace" "xapi-stdext-pervasives" ] synopsis: "Library required by xapi" diff --git a/xapi-rrd.opam b/xapi-rrd.opam index abc1e4bb28c..3c5613224fb 100644 --- a/xapi-rrd.opam +++ b/xapi-rrd.opam @@ -16,7 +16,7 @@ build: [ ] depends: [ "ocaml" {>= "4.14"} - "dune" {>= "2.0.0"} + "dune" {>= "3.15"} "base-bigarray" "base-unix" "ppx_deriving_rpc" {>= "6.1.0"} diff --git a/xapi-rrd.opam.template b/xapi-rrd.opam.template index 8185db9f7aa..4397c184eb5 100644 --- a/xapi-rrd.opam.template +++ b/xapi-rrd.opam.template @@ -14,7 +14,7 @@ build: [ ] depends: [ "ocaml" {>= "4.14"} - "dune" {>= "2.0.0"} + "dune" {>= "3.15"} "base-bigarray" "base-unix" "ppx_deriving_rpc" {>= "6.1.0"} diff --git a/xapi-rrdd-plugin.opam b/xapi-rrdd-plugin.opam index b01d85a6da5..e102355bf9c 100644 --- a/xapi-rrdd-plugin.opam +++ b/xapi-rrdd-plugin.opam @@ -20,7 +20,7 @@ authors: "xen-api@lists.xen.org" homepage: "https://xapi-project.github.io/" bug-reports: "https://github.com/xapi-project/xen-api/issues" dev-repo: "git+https://github.com/xapi-project/xen-api.git" -depends: ["ocaml" "rrdd-plugin"] +depends: ["ocaml" "rrdd-plugin" "xenstore" "xenstore_transport" "dune" {>= "3.15"}] synopsis: "A plugin library for the xapi performance monitoring daemon" description: """ This library allows one to expose a datasource which can then be diff --git a/xapi-rrdd-plugin.opam.template b/xapi-rrdd-plugin.opam.template index 432db33bc02..0eaa9df6f8f 100644 --- a/xapi-rrdd-plugin.opam.template +++ b/xapi-rrdd-plugin.opam.template @@ -4,7 +4,7 @@ authors: "xen-api@lists.xen.org" homepage: "https://xapi-project.github.io/" bug-reports: "https://github.com/xapi-project/xen-api/issues" dev-repo: "git+https://github.com/xapi-project/xen-api.git" -depends: ["ocaml" "rrdd-plugin"] +depends: ["ocaml" "rrdd-plugin" "xenstore" "xenstore_transport" "dune" {>= "3.15"}] synopsis: "A plugin library for the xapi performance monitoring daemon" description: """ This library allows one to expose a datasource which can then be diff --git a/xapi-rrdd.opam b/xapi-rrdd.opam index 89b2d827a69..62d448f2869 100644 --- a/xapi-rrdd.opam +++ b/xapi-rrdd.opam @@ -13,13 +13,18 @@ depends: [ "ocaml" {>= "4.02.0"} "alcotest" {with-test} "astring" + "cmdliner" + "fmt" {with-test} "gzip" {= version} "http-lib" {= version} "inotify" "io-page" + "ipaddr" "mtime" + "polly" "ppx_deriving_rpc" "rpclib" + "uri" "ezxenstore" {= version} "uuid" {= version} "xapi-backtrace" @@ -28,6 +33,11 @@ depends: [ "xapi-stdext-threads" {= version} "xapi-stdext-unix" {= version} "xapi-tracing" + "xenctrl" + "xenstore" + "xenstore_transport" + "xmlm" + "yojson" "odoc" {with-doc} ] build: [ diff --git a/xapi-schema.opam b/xapi-schema.opam index f4303e871a2..9a3b702fcd0 100644 --- a/xapi-schema.opam +++ b/xapi-schema.opam @@ -12,7 +12,7 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "ppx_sexp_conv" "sexpr" "xapi-log" diff --git a/xapi-schema.opam.template b/xapi-schema.opam.template index 60e1dc71ad9..f6b9f276789 100644 --- a/xapi-schema.opam.template +++ b/xapi-schema.opam.template @@ -10,7 +10,7 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "ppx_sexp_conv" "sexpr" "xapi-log" diff --git a/xapi-sdk.opam b/xapi-sdk.opam index b09d4c60808..8adccdf2932 100644 --- a/xapi-sdk.opam +++ b/xapi-sdk.opam @@ -10,9 +10,11 @@ depends: [ "dune" {>= "3.15"} "alcotest" {with-test} "astring" + "fmt" {with-test} "mustache" "xapi-datamodel" {= version} "xapi-stdext-unix" {= version & with-test} + "xapi-test-utils" {with-test} "odoc" {with-doc} ] build: [ diff --git a/xapi-squeezed.opam b/xapi-squeezed.opam index 52dd6fdc3dc..7b2de24dd18 100644 --- a/xapi-squeezed.opam +++ b/xapi-squeezed.opam @@ -12,9 +12,10 @@ build: [ ] depends: [ "ocaml" + "alcotest" {with-test} "astring" "cohttp" {>= "0.11.0"} - "dune" + "dune" {>= "3.15"} "re" "rpclib" "uri" diff --git a/xapi-squeezed.opam.template b/xapi-squeezed.opam.template index 84ad0840a82..9641c69858a 100644 --- a/xapi-squeezed.opam.template +++ b/xapi-squeezed.opam.template @@ -9,9 +9,10 @@ build: [ ] depends: [ "ocaml" + "alcotest" {with-test} "astring" "cohttp" {>= "0.11.0"} - "dune" + "dune" {>= "3.15"} "re" "rpclib" "uri" diff --git a/xapi-stdext-threads.opam b/xapi-stdext-threads.opam index eba91836d0f..ae64e906b29 100644 --- a/xapi-stdext-threads.opam +++ b/xapi-stdext-threads.opam @@ -10,6 +10,8 @@ depends: [ "dune" {>= "3.15"} "base-threads" "base-unix" + "alcotest" {with-test} + "fmt" {with-test} "odoc" {with-doc} "xapi-stdext-pervasives" {= version} "mtime" {with-test} diff --git a/xapi-stdext-unix.opam b/xapi-stdext-unix.opam index 4daa2eb9326..41760ac6a8e 100644 --- a/xapi-stdext-unix.opam +++ b/xapi-stdext-unix.opam @@ -10,10 +10,12 @@ depends: [ "dune" {>= "3.15"} "ocaml" {>= "4.12.0"} "alcotest" {with-test} + "astring" "base-unix" "bisect_ppx" {with-test} "fd-send-recv" {>= "2.0.0"} "fmt" + "integers" "mtime" {>= "2.0.0" & with-test} "logs" {with-test} "qcheck-core" {>= "0.21.2" & with-test} diff --git a/xapi-storage-cli.opam b/xapi-storage-cli.opam index 4b9314babe8..c58a06832eb 100644 --- a/xapi-storage-cli.opam +++ b/xapi-storage-cli.opam @@ -11,7 +11,7 @@ dev-repo: "git+https://github.com/xapi-project/xen-api.git" build: [[ "dune" "build" "-p" name "-j" jobs ]] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "base-threads" "re" "rpclib" diff --git a/xapi-storage-cli.opam.template b/xapi-storage-cli.opam.template index b8201d62b34..3ffbe86d8a3 100644 --- a/xapi-storage-cli.opam.template +++ b/xapi-storage-cli.opam.template @@ -9,7 +9,7 @@ dev-repo: "git+https://github.com/xapi-project/xen-api.git" build: [[ "dune" "build" "-p" name "-j" jobs ]] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "base-threads" "re" "rpclib" diff --git a/xapi-storage-script.opam b/xapi-storage-script.opam index 87fce518a89..a8df41ef405 100644 --- a/xapi-storage-script.opam +++ b/xapi-storage-script.opam @@ -12,7 +12,7 @@ tags: [ "org:xapi-project" ] build: [[ "dune" "build" "-p" name "-j" jobs ]] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "conf-python-3" {with-test} "xapi-idl" {>= "0.10.0"} "xapi-storage" diff --git a/xapi-storage-script.opam.template b/xapi-storage-script.opam.template index 01f859d7b36..b40cc0880b5 100644 --- a/xapi-storage-script.opam.template +++ b/xapi-storage-script.opam.template @@ -10,7 +10,7 @@ tags: [ "org:xapi-project" ] build: [[ "dune" "build" "-p" name "-j" jobs ]] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "conf-python-3" {with-test} "xapi-idl" {>= "0.10.0"} "xapi-storage" diff --git a/xapi-storage.opam b/xapi-storage.opam index c6d5ae2a086..f71b424c430 100644 --- a/xapi-storage.opam +++ b/xapi-storage.opam @@ -12,11 +12,13 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "conf-python-3" "alcotest" {with-test} "lwt" {with-test} "rpclib" {with-test} + "result" + "rresult" "ppx_deriving_rpc" "rpclib" "xmlm" diff --git a/xapi-storage.opam.template b/xapi-storage.opam.template index 91a35266e5e..779e459a78c 100644 --- a/xapi-storage.opam.template +++ b/xapi-storage.opam.template @@ -10,11 +10,13 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "conf-python-3" "alcotest" {with-test} "lwt" {with-test} "rpclib" {with-test} + "result" + "rresult" "ppx_deriving_rpc" "rpclib" "xmlm" diff --git a/xapi-tracing-export.opam b/xapi-tracing-export.opam index fb00c67bc06..e17845a1d0f 100644 --- a/xapi-tracing-export.opam +++ b/xapi-tracing-export.opam @@ -13,8 +13,12 @@ depends: [ "cohttp-posix" "dune" {>= "3.15"} "cohttp" + "ptime" + "result" + "rresult" "rpclib" "ppx_deriving_rpc" + "uri" "xapi-log" {= version} "xapi-open-uri" {= version} "xapi-stdext-threads" {= version} diff --git a/xapi-tracing.opam b/xapi-tracing.opam index a2ae1016cea..b9cac8ba0dd 100644 --- a/xapi-tracing.opam +++ b/xapi-tracing.opam @@ -12,6 +12,7 @@ depends: [ "ocaml" "dune" {>= "3.15"} "alcotest" {with-test} + "fmt" {with-test} "re" "uri" "uuid" {with-test} diff --git a/xapi-types.opam b/xapi-types.opam index 9f69f9d3983..a62e4c8fca3 100644 --- a/xapi-types.opam +++ b/xapi-types.opam @@ -13,7 +13,7 @@ build: [ depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "astring" "ppx_deriving_rpc" "rpclib" diff --git a/xapi-types.opam.template b/xapi-types.opam.template index c3a998e5004..41e667d7fa2 100644 --- a/xapi-types.opam.template +++ b/xapi-types.opam.template @@ -11,7 +11,7 @@ build: [ depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} "astring" "ppx_deriving_rpc" "rpclib" diff --git a/xapi-xenopsd-cli.opam b/xapi-xenopsd-cli.opam index ee20d166b3b..dfd3eab41f8 100644 --- a/xapi-xenopsd-cli.opam +++ b/xapi-xenopsd-cli.opam @@ -12,15 +12,20 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} + "astring" "base-threads" "cmdliner" + "ppx_deriving_rpc" "re" + "result" "rpclib" "rresult" "uuid" + "uuidm" "xapi-idl" "xenstore_transport" {with-test} + "yojson" ] synopsis: "A simple command-line tool for interacting with xenopsd" description: """ diff --git a/xapi-xenopsd-cli.opam.template b/xapi-xenopsd-cli.opam.template index f5166466189..da363888e1b 100644 --- a/xapi-xenopsd-cli.opam.template +++ b/xapi-xenopsd-cli.opam.template @@ -10,15 +10,20 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} + "astring" "base-threads" "cmdliner" + "ppx_deriving_rpc" "re" + "result" "rpclib" "rresult" "uuid" + "uuidm" "xapi-idl" "xenstore_transport" {with-test} + "yojson" ] synopsis: "A simple command-line tool for interacting with xenopsd" description: """ diff --git a/xapi-xenopsd-simulator.opam b/xapi-xenopsd-simulator.opam index 1ad22ebd290..45cffcfc82a 100644 --- a/xapi-xenopsd-simulator.opam +++ b/xapi-xenopsd-simulator.opam @@ -13,7 +13,7 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "base-unix" "xapi-xenopsd" ] diff --git a/xapi-xenopsd-simulator.opam.template b/xapi-xenopsd-simulator.opam.template index af6746862bd..b23fb7ea5fa 100644 --- a/xapi-xenopsd-simulator.opam.template +++ b/xapi-xenopsd-simulator.opam.template @@ -11,7 +11,7 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "base-unix" "xapi-xenopsd" ] diff --git a/xapi-xenopsd-xc.opam b/xapi-xenopsd-xc.opam index 9a355cd3fb4..53eb84adf96 100644 --- a/xapi-xenopsd-xc.opam +++ b/xapi-xenopsd-xc.opam @@ -14,15 +14,19 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "astring" "base-threads" "base-unix" + "base64" + "bos" + "cmdliner" "conf-xen" "ezxenstore" "fd-send-recv" "fmt" "forkexec" + "inotify" "mtime" "polly" "ppx_deriving_rpc" @@ -33,7 +37,10 @@ depends: [ "rpclib" "rresult" "sexplib0" + "uri" "uuid" + "uuidm" + "uutf" "xapi-backtrace" "xapi-idl" "xapi-rrd" @@ -46,6 +53,7 @@ depends: [ "xenctrl" "xenstore" "xenstore_transport" + "xenmmap" ] synopsis: "A xenops plugin which knows how to use xenstore, xenctrl and xenguest to manage" diff --git a/xapi-xenopsd-xc.opam.template b/xapi-xenopsd-xc.opam.template index a0490712875..ef97615d427 100644 --- a/xapi-xenopsd-xc.opam.template +++ b/xapi-xenopsd-xc.opam.template @@ -12,15 +12,19 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "astring" "base-threads" "base-unix" + "base64" + "bos" + "cmdliner" "conf-xen" "ezxenstore" "fd-send-recv" "fmt" "forkexec" + "inotify" "mtime" "polly" "ppx_deriving_rpc" @@ -31,7 +35,10 @@ depends: [ "rpclib" "rresult" "sexplib0" + "uri" "uuid" + "uuidm" + "uutf" "xapi-backtrace" "xapi-idl" "xapi-rrd" @@ -44,6 +51,7 @@ depends: [ "xenctrl" "xenstore" "xenstore_transport" + "xenmmap" ] synopsis: "A xenops plugin which knows how to use xenstore, xenctrl and xenguest to manage" diff --git a/xapi-xenopsd.opam b/xapi-xenopsd.opam index c5f5c34474c..f255a13cc30 100644 --- a/xapi-xenopsd.opam +++ b/xapi-xenopsd.opam @@ -14,7 +14,7 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "base-threads" "alcotest" {with-test} "astring" @@ -32,6 +32,7 @@ depends: [ "sexplib0" "uri" "uuid" + "uuidm" "uutf" "xapi-backtrace" "xapi-idl" @@ -39,6 +40,7 @@ depends: [ "xapi-stdext-pervasives" "xapi-stdext-threads" "xapi-stdext-unix" + "xapi-test-utils" {with-test} "xapi-tracing" "xapi-tracing-export" "xenstore_transport" {with-test} diff --git a/xapi-xenopsd.opam.template b/xapi-xenopsd.opam.template index 39b101a724e..93961549f71 100644 --- a/xapi-xenopsd.opam.template +++ b/xapi-xenopsd.opam.template @@ -12,7 +12,7 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "base-threads" "alcotest" {with-test} "astring" @@ -30,6 +30,7 @@ depends: [ "sexplib0" "uri" "uuid" + "uuidm" "uutf" "xapi-backtrace" "xapi-idl" @@ -37,6 +38,7 @@ depends: [ "xapi-stdext-pervasives" "xapi-stdext-threads" "xapi-stdext-unix" + "xapi-test-utils" {with-test} "xapi-tracing" "xapi-tracing-export" "xenstore_transport" {with-test} diff --git a/xapi.opam b/xapi.opam index 16dcc46d2b4..4179ebc22ed 100644 --- a/xapi.opam +++ b/xapi.opam @@ -12,19 +12,27 @@ depends: [ "dune" {>= "3.15"} "alcotest" "angstrom" + "astring" "base-threads" "base64" + "bos" {with-test} "cdrom" + "cmdliner" + "cohttp" "conf-pam" "crowbar" {with-test} + "cstruct" "ctypes" "ctypes-foreign" "domain-name" "ezxenstore" {= version} - "fmt" {with-test} + "fmt" "hex" "http-lib" {with-test & = version} + "integers" "ipaddr" + "logs" + "magic-mime" "mirage-crypto" "mirage-crypto-pk" "mirage-crypto-rng" {>= "0.11.0"} @@ -33,21 +41,31 @@ depends: [ "opentelemetry-client-ocurl" "pci" "pciutil" {= version} + "polly" "ppx_deriving_rpc" "ppx_sexp_conv" "ppx_deriving" "psq" + "ptime" "qcheck-alcotest" + "qcheck-core" + "re" + "result" "rpclib" "rrdd-plugin" {= version} "rresult" "sexpr" + "sexplib" + "sexplib0" "sha" "stunnel" {= version} "tar" "tar-unix" + "uri" "uuid" {= version} + "uuidm" "x509" + "xapi-backtrace" "xapi-client" {= version} "xapi-cli-protocol" {= version} "xapi-consts" {= version} @@ -66,6 +84,9 @@ depends: [ "xapi-tracing" {= version} "xapi-types" {= version} "xapi-xenopsd" {= version} + "xenctrl" + "xenstore_transport" + "xmlm" "xml-light2" {= version} "yojson" "zstd" {= version} diff --git a/xe.opam b/xe.opam index eb83012f600..0e3953ccd29 100644 --- a/xe.opam +++ b/xe.opam @@ -12,16 +12,20 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} + "astring" + "base-threads" "fpath" "stunnel" - "base-threads" + "uri" + "xapi-backtrace" "xapi-cli-protocol" "xapi-consts" "xapi-datamodel" "xapi-stdext-pervasives" "xapi-stdext-std" "xapi-stdext-unix" + "yojson" ] synopsis: "The xapi toolstack daemon which implements the XenAPI" description: """ diff --git a/xe.opam.template b/xe.opam.template index 8884529da4d..fb95826fa60 100644 --- a/xe.opam.template +++ b/xe.opam.template @@ -10,16 +10,20 @@ build: [ ] depends: [ "ocaml" - "dune" {build & >= "1.4"} + "dune" {>= "3.15"} + "astring" + "base-threads" "fpath" "stunnel" - "base-threads" + "uri" + "xapi-backtrace" "xapi-cli-protocol" "xapi-consts" "xapi-datamodel" "xapi-stdext-pervasives" "xapi-stdext-std" "xapi-stdext-unix" + "yojson" ] synopsis: "The xapi toolstack daemon which implements the XenAPI" description: """ diff --git a/xen-api-client-async.opam b/xen-api-client-async.opam index c53b756b7c8..c283cb6d1e8 100644 --- a/xen-api-client-async.opam +++ b/xen-api-client-async.opam @@ -16,12 +16,16 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "async" {>= "v0.9.0"} + "async_kernel" "async_unix" + "base" "base-threads" "cohttp" {>= "0.22.0"} "core" + "core_kernel" + "core_unix" "rpclib" "uri" "xen-api-client" diff --git a/xen-api-client-async.opam.template b/xen-api-client-async.opam.template index 6aa8a312052..8224d441c1d 100644 --- a/xen-api-client-async.opam.template +++ b/xen-api-client-async.opam.template @@ -14,12 +14,16 @@ build: [ ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "async" {>= "v0.9.0"} + "async_kernel" "async_unix" + "base" "base-threads" "cohttp" {>= "0.22.0"} "core" + "core_kernel" + "core_unix" "rpclib" "uri" "xen-api-client" diff --git a/xen-api-client-lwt.opam b/xen-api-client-lwt.opam index 3ac1592eca0..d1c25f04f3a 100644 --- a/xen-api-client-lwt.opam +++ b/xen-api-client-lwt.opam @@ -16,7 +16,9 @@ build: [ ] depends: [ "ocaml" - "dune" {>= "1.4"} + "dune" {>= "3.15"} + "astring" + "bigarray-compat" "cohttp" {>= "0.22.0"} "cohttp-lwt-unix" "cstruct" {>= "1.0.1"} @@ -24,6 +26,7 @@ depends: [ "lwt_ssl" "re" "rpclib" + "ssl" "uri" "xen-api-client" "xmlm" diff --git a/xen-api-client-lwt.opam.template b/xen-api-client-lwt.opam.template index 81633c40c2e..20b7069791c 100644 --- a/xen-api-client-lwt.opam.template +++ b/xen-api-client-lwt.opam.template @@ -14,7 +14,9 @@ build: [ ] depends: [ "ocaml" - "dune" {>= "1.4"} + "dune" {>= "3.15"} + "astring" + "bigarray-compat" "cohttp" {>= "0.22.0"} "cohttp-lwt-unix" "cstruct" {>= "1.0.1"} @@ -22,6 +24,7 @@ depends: [ "lwt_ssl" "re" "rpclib" + "ssl" "uri" "xen-api-client" "xmlm" diff --git a/xml-light2.opam b/xml-light2.opam index da5264648de..5d2cadac09c 100644 --- a/xml-light2.opam +++ b/xml-light2.opam @@ -11,7 +11,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "xmlm" ] synopsis: "Library required by xapi" diff --git a/xml-light2.opam.template b/xml-light2.opam.template index 1c6db3e0ca1..04fabda6a1a 100644 --- a/xml-light2.opam.template +++ b/xml-light2.opam.template @@ -9,7 +9,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "xmlm" ] synopsis: "Library required by xapi" diff --git a/zstd.opam b/zstd.opam index 59901c80ee6..7a04554f2a9 100644 --- a/zstd.opam +++ b/zstd.opam @@ -11,7 +11,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "xapi-compression" ] synopsis: "Library required by xapi" diff --git a/zstd.opam.template b/zstd.opam.template index 8e7be0f3783..7c960776d88 100644 --- a/zstd.opam.template +++ b/zstd.opam.template @@ -9,7 +9,7 @@ build: [[ "dune" "build" "-p" name "-j" jobs ]] available: [ os = "linux" ] depends: [ "ocaml" - "dune" + "dune" {>= "3.15"} "xapi-compression" ] synopsis: "Library required by xapi" From cee8ed67dc3960680b3226168dfab6f5608dd4f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 4 Sep 2024 18:14:07 +0100 Subject: [PATCH 012/141] CP-51479: [maintenance]: make install.sh more quiet MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `set -x` is only needed when debugging the script. Don't flood 'make install' output. Signed-off-by: Edwin Török --- scripts/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/install.sh b/scripts/install.sh index bd3afe9a665..4b0dc0f8dd6 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -18,7 +18,7 @@ # @LIBEXECDIR@ # @SCRIPTSDIR@ -set -x +#set -x MODE=${1} NUM_FILES=$(($#-2)) From f465a5550a964f872e32800fb5bbf7651e56b692 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 4 Sep 2024 18:17:44 +0100 Subject: [PATCH 013/141] CP-51479: [maintenance]: running make install twice in a row should succeed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously it failed because the symlink already existed. Signed-off-by: Edwin Török --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7d56b541300..be3cb1a05bf 100644 --- a/Makefile +++ b/Makefile @@ -239,7 +239,7 @@ install: build doc sdk doc-json install -D ./ocaml/xenopsd/scripts/tap $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/tap install -D ./ocaml/xenopsd/scripts/setup-vif-rules $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/setup-vif-rules install -D ./_build/install/default/bin/pvs-proxy-ovs-setup $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/pvs-proxy-ovs-setup - (cd $(DESTDIR)/$(XENOPSD_LIBEXECDIR) && ln -s pvs-proxy-ovs-setup setup-pvs-proxy-rules) + (cd $(DESTDIR)/$(XENOPSD_LIBEXECDIR) && ln -sf pvs-proxy-ovs-setup setup-pvs-proxy-rules) install -D ./ocaml/xenopsd/scripts/common.py $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/common.py install -D ./ocaml/xenopsd/scripts/igmp_query_injector.py $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/igmp_query_injector.py install -D ./ocaml/xenopsd/scripts/qemu-wrapper $(DESTDIR)/$(QEMU_WRAPPER_DIR)/qemu-wrapper From 735338edaed2fac474bc4be80a2ec781e7074952 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 13 Sep 2024 14:59:27 +0100 Subject: [PATCH 014/141] CP-51479: [maintenance]: ocaml/sdk-gen should create directories before writing files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When run inside Dune's sandbox you cannot rely on another rule having created the empty directories for you. Ensure that we create parent directories before we write a new file, otherwise we fail with a `Sys_error` about `No such file or directory`. Signed-off-by: Edwin Török --- ocaml/sdk-gen/common/CommonFunctions.ml | 3 +++ ocaml/sdk-gen/common/dune | 1 + ocaml/sdk-gen/java/dune | 1 + ocaml/sdk-gen/java/main.ml | 4 +++- 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/ocaml/sdk-gen/common/CommonFunctions.ml b/ocaml/sdk-gen/common/CommonFunctions.ml index 5f1b5b3a560..e8004e140cf 100644 --- a/ocaml/sdk-gen/common/CommonFunctions.ml +++ b/ocaml/sdk-gen/common/CommonFunctions.ml @@ -32,6 +32,7 @@ let string_of_file filename = ~finally:(fun () -> close_in in_channel) let with_output filename f = + Xapi_stdext_unix.Unixext.mkdir_rec (Filename.dirname filename) 0o755 ; let io = open_out filename in Fun.protect (fun () -> f io) ~finally:(fun () -> close_out io) @@ -264,6 +265,7 @@ and get_published_info_field field cls = and render_template template_file json output_file = let templ = string_of_file template_file |> Mustache.of_string in let rendered = Mustache.render templ json in + Xapi_stdext_unix.Unixext.mkdir_rec (Filename.dirname output_file) 0o755 ; let out_chan = open_out output_file in Fun.protect (fun () -> output_string out_chan rendered) @@ -272,6 +274,7 @@ and render_template template_file json output_file = let render_file (infile, outfile) json templates_dir dest_dir = let input_path = Filename.concat templates_dir infile in let output_path = Filename.concat dest_dir outfile in + Xapi_stdext_unix.Unixext.mkdir_rec (Filename.dirname output_path) 0o755 ; render_template input_path json output_path let json_releases = diff --git a/ocaml/sdk-gen/common/dune b/ocaml/sdk-gen/common/dune index 777d29b16ce..ea0011e71ce 100644 --- a/ocaml/sdk-gen/common/dune +++ b/ocaml/sdk-gen/common/dune @@ -7,6 +7,7 @@ xapi-datamodel mustache xapi-stdext-std + xapi-stdext-unix ) (modules_without_implementation license) ) diff --git a/ocaml/sdk-gen/java/dune b/ocaml/sdk-gen/java/dune index a1daac834b0..e83e9fea097 100644 --- a/ocaml/sdk-gen/java/dune +++ b/ocaml/sdk-gen/java/dune @@ -8,6 +8,7 @@ mustache str xapi-datamodel + xapi-stdext-unix ) ) diff --git a/ocaml/sdk-gen/java/main.ml b/ocaml/sdk-gen/java/main.ml index 58254d3517b..ea70c20673c 100644 --- a/ocaml/sdk-gen/java/main.ml +++ b/ocaml/sdk-gen/java/main.ml @@ -817,5 +817,7 @@ let _ = populate_types types templdir class_dir ; let uncommented_license = string_of_file "LICENSE" in - let class_license = open_out "autogen/xen-api/src/main/resources/LICENSE" in + let filename = "autogen/xen-api/src/main/resources/LICENSE" in + Xapi_stdext_unix.Unixext.mkdir_rec (Filename.dirname filename) 0o755 ; + let class_license = open_out filename in output_string class_license uncommented_license From 5ae36481ca3023a72af7d92b1d734b839a1df9c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 13 Sep 2024 15:00:50 +0100 Subject: [PATCH 015/141] CP-51479: [maintenance]: add @sdkgen rule for convenience in dune MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also add an 'alias generate' for the csharp/autogen/LICENSE file. All other sdk subdirs have a 'generate' alias in 'autogen', except for 'csharp', and without it 'make sdk' would fail to build due to the missing file. Signed-off-by: Edwin Török --- Makefile | 13 +------------ ocaml/sdk-gen/csharp/autogen/dune | 1 + ocaml/sdk-gen/dune | 16 ++++++++++++++++ 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index be3cb1a05bf..6c7a7f8cab0 100644 --- a/Makefile +++ b/Makefile @@ -97,18 +97,7 @@ doc: sdk: cp $(SHAREDIR)/sm/XE_SR_ERRORCODES.xml ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml - dune build --profile=$(PROFILE) \ - ocaml/sdk-gen/c/gen_c_binding.exe \ - ocaml/sdk-gen/csharp/gen_csharp_binding.exe \ - ocaml/sdk-gen/java/main.exe \ - ocaml/sdk-gen/powershell/gen_powershell_binding.exe \ - ocaml/sdk-gen/go/gen_go_binding.exe - dune build --profile=$(PROFILE) -f\ - @ocaml/sdk-gen/c/generate \ - @ocaml/sdk-gen/csharp/generate \ - @ocaml/sdk-gen/java/generate \ - @ocaml/sdk-gen/powershell/generate \ - @ocaml/sdk-gen/go/generate + dune build --profile=$(PROFILE) -f @sdkgen rm -rf $(XAPISDK) mkdir -p $(XAPISDK)/c mkdir -p $(XAPISDK)/csharp diff --git a/ocaml/sdk-gen/csharp/autogen/dune b/ocaml/sdk-gen/csharp/autogen/dune index 61e1f86a0a4..738e07d974f 100644 --- a/ocaml/sdk-gen/csharp/autogen/dune +++ b/ocaml/sdk-gen/csharp/autogen/dune @@ -1,4 +1,5 @@ (rule + (alias generate) (targets LICENSE) (deps ../../LICENSE diff --git a/ocaml/sdk-gen/dune b/ocaml/sdk-gen/dune index 49140147129..4d10d554e4e 100644 --- a/ocaml/sdk-gen/dune +++ b/ocaml/sdk-gen/dune @@ -1 +1,17 @@ (data_only_dirs component-test) + +(alias + (name sdkgen) + (deps + c/gen_c_binding.exe + csharp/gen_csharp_binding.exe + java/main.exe + powershell/gen_powershell_binding.exe + go/gen_go_binding.exe + (alias_rec c/generate) + (alias_rec csharp/generate) + (alias_rec java/generate) + (alias_rec powershell/generate) + (alias_rec go/generate) + ) +) From 9a89dec2e145f0b9efdab1360e28047c9e413d71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 13 Sep 2024 16:12:36 +0100 Subject: [PATCH 016/141] CP-51479: [maintenance]: install SDK files using dune rules MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid copying twice, get dune to install the files to the correct destination in one go. Also add a copy of XE_SR_ERRORCODES.xml, by default 'make install' would look for this in /opt/xensource, and writing there requires root. (This can be overriden with `./configure --share`). Since we are using `dune` to install the files now we need the file to always be present. Had to adjust the paths used by the CI. Uses dune directory targets, and the directory must be entirely under the control of these rules. There are some static files in autogen/ though, so move the generated ones to autogen-out, and then use 'cp -r' to copy over the static ones (there is no builtin dune action for the copy, there are individual copy actions, or a copy_files rule, but neither is suitable here) Signed-off-by: Edwin Török --- .github/workflows/generate-and-build-sdks.yml | 14 +- .github/workflows/go-ci/action.yml | 6 +- .github/workflows/sdk-ci/action.yml | 2 +- .gitignore | 3 - Makefile | 32 +- dune | 6 + dune-project | 1 + ocaml/doc/README.md | 6 +- ocaml/sdk-gen/c/dune | 14 +- ocaml/sdk-gen/c/gen_c_binding.ml | 2 +- ocaml/sdk-gen/component-test/README.md | 2 +- ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml | 914 ++++++++++++++++++ ocaml/sdk-gen/csharp/autogen/dune | 9 +- ocaml/sdk-gen/csharp/dune | 29 +- ocaml/sdk-gen/csharp/friendly_error_names.ml | 2 +- ocaml/sdk-gen/csharp/gen_csharp_binding.ml | 2 +- ocaml/sdk-gen/dune | 1 + ocaml/sdk-gen/go/dune | 14 +- ocaml/sdk-gen/java/autogen/dune | 1 + ocaml/sdk-gen/java/dune | 13 +- ocaml/sdk-gen/java/main.ml | 6 +- ocaml/sdk-gen/powershell/autogen/dune | 5 + ocaml/sdk-gen/powershell/dune | 18 +- .../powershell/gen_powershell_binding.ml | 2 +- ocaml/sdk-gen/windows-line-endings.sh | 0 25 files changed, 1034 insertions(+), 70 deletions(-) create mode 100644 ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml mode change 100644 => 100755 ocaml/sdk-gen/windows-line-endings.sh diff --git a/.github/workflows/generate-and-build-sdks.yml b/.github/workflows/generate-and-build-sdks.yml index 9c263900f77..a439c969b50 100644 --- a/.github/workflows/generate-and-build-sdks.yml +++ b/.github/workflows/generate-and-build-sdks.yml @@ -32,34 +32,34 @@ jobs: with: name: SDK_Source_C path: | - _build/install/default/xapi/sdk/c/* - !_build/install/default/xapi/sdk/c/dune + _build/install/default/share/c/* + !_build/install/default/share/c/dune - name: Store C# SDK source uses: actions/upload-artifact@v4 with: name: SDK_Source_CSharp - path: _build/install/default/xapi/sdk/csharp/* + path: _build/install/default/share/csharp/* - name: Store PowerShell SDK source uses: actions/upload-artifact@v4 with: name: SDK_Source_PowerShell - path: _build/install/default/xapi/sdk/powershell/* + path: _build/install/default/share/powershell/* - name: Store Go SDK Artifacts uses: actions/upload-artifact@v4 with: name: SDK_Artifacts_Go path: | - _build/install/default/xapi/sdk/go/* - !_build/install/default/xapi/sdk/go/dune + _build/install/default/share/go/* + !_build/install/default/share/go/dune - name: Store Java SDK source uses: actions/upload-artifact@v4 with: name: SDK_Source_Java - path: _build/install/default/xapi/sdk/java/* + path: _build/install/default/share/java/* - name: Trim dune cache run: opam exec -- dune cache trim --size=2GiB diff --git a/.github/workflows/go-ci/action.yml b/.github/workflows/go-ci/action.yml index 6dc66224fe0..c1b2df7f1e1 100644 --- a/.github/workflows/go-ci/action.yml +++ b/.github/workflows/go-ci/action.yml @@ -11,12 +11,12 @@ runs: uses: golangci/golangci-lint-action@v4 with: version: v1.57.2 - working-directory: ${{ github.workspace }}/_build/install/default/xapi/sdk/go/src + working-directory: ${{ github.workspace }}/_build/install/default/share/go/src args: --config=${{ github.workspace }}/.golangci.yml - name: Run CI for Go SDK shell: bash run: | cd ./ocaml/sdk-gen/component-test/ - cp -r ${{ github.workspace }}/_build/install/default/xapi/sdk/go/src jsonrpc-client/go/goSDK - bash run-tests.sh \ No newline at end of file + cp -r ${{ github.workspace }}/_build/install/default/share/go/src jsonrpc-client/go/goSDK + bash run-tests.sh diff --git a/.github/workflows/sdk-ci/action.yml b/.github/workflows/sdk-ci/action.yml index f20b59ee8d6..6781b6a8644 100644 --- a/.github/workflows/sdk-ci/action.yml +++ b/.github/workflows/sdk-ci/action.yml @@ -17,4 +17,4 @@ runs: - name: Run CI for Go SDK uses: ./.github/workflows/go-ci - # Run other tests here \ No newline at end of file + # Run other tests here diff --git a/.gitignore b/.gitignore index 27ed892007d..2c90d7261d3 100644 --- a/.gitignore +++ b/.gitignore @@ -21,9 +21,6 @@ python3/examples/XenAPI.egg-info/ python3/examples/build/ python3/examples/dist/ -# ignore file needed for building the SDK -ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml - # configure-generated files ocaml/xenopsd/scripts/vif ocaml/xenopsd/scripts/xen-backend.rules diff --git a/Makefile b/Makefile index 6c7a7f8cab0..3dd3947f2eb 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ include config.mk -XAPIDOC=_build/install/default/xapi/doc -XAPISDK=_build/install/default/xapi/sdk +XAPIDOC=_build/install/default/usr/share/xapi/doc +XAPISDK=_build/install/default/usr/share/xapi/sdk JOBS = $(shell getconf _NPROCESSORS_ONLN) PROFILE=release OPTMANDIR ?= $(OPTDIR)/man/man1/ @@ -12,6 +12,8 @@ OPTMANDIR ?= $(OPTDIR)/man/man1/ # this is typically used when we're not building from a git repo build: [ -z "${XAPI_VERSION}" ] || (sed -i '/(version.*)/d' dune-project && echo "(version ${XAPI_VERSION})" >> dune-project) +# if available use external file, otherwise use built-in, this allows building XAPI without being root + ! test -f $(SHAREDIR)/sm/XE_SR_ERRORCODES.xml || cp $(SHAREDIR)/sm/XE_SR_ERRORCODES.xml ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml dune build @ocaml/idl/update-dm-lifecycle -j $(JOBS) --profile=$(PROFILE) --auto-promote || dune build @ocaml/idl/update-dm-lifecycle -j $(JOBS) --profile=$(PROFILE) --auto-promote dune build @install -j $(JOBS) --profile=$(PROFILE) dune build @ocaml/xapi-storage/python/xapi/storage/api/v5/python --profile=$(PROFILE) @@ -96,33 +98,17 @@ doc: dune build --profile=$(PROFILE) -f @man sdk: - cp $(SHAREDIR)/sm/XE_SR_ERRORCODES.xml ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml - dune build --profile=$(PROFILE) -f @sdkgen - rm -rf $(XAPISDK) - mkdir -p $(XAPISDK)/c - mkdir -p $(XAPISDK)/csharp - mkdir -p $(XAPISDK)/java - mkdir -p $(XAPISDK)/powershell - mkdir -p $(XAPISDK)/python - mkdir -p $(XAPISDK)/go - cp -r _build/default/ocaml/sdk-gen/c/autogen/* $(XAPISDK)/c - cp -r _build/default/ocaml/sdk-gen/csharp/autogen/* $(XAPISDK)/csharp - cp -r _build/default/ocaml/sdk-gen/java/autogen/* $(XAPISDK)/java - cp -r _build/default/ocaml/sdk-gen/powershell/autogen/* $(XAPISDK)/powershell - cp -r _build/default/ocaml/sdk-gen/go/autogen/* $(XAPISDK)/go - cp python3/examples/XenAPI/XenAPI.py $(XAPISDK)/python - sh ocaml/sdk-gen/windows-line-endings.sh $(XAPISDK)/csharp - sh ocaml/sdk-gen/windows-line-endings.sh $(XAPISDK)/powershell + dune build --profile=$(PROFILE) @sdkgen xapi-sdk.install @ocaml/sdk-gen/install .PHONY: sdk-build-c sdk-build-c: sdk - cd _build/install/default/xapi/sdk/c && make clean && make -j $(JOBS) + cd _build/install/default/share/c && make clean && make -j $(JOBS) .PHONY: sdk-build-java sdk-build-java: sdk - cd _build/install/default/xapi/sdk/java && mvn -f xen-api/pom.xml -B clean package install -Drevision=0.0 + cd _build/install/default/share/java && mvn -f xen-api/pom.xml -B clean package install -Drevision=0.0 python: $(MAKE) -C python3/examples build @@ -260,9 +246,7 @@ install: build doc sdk doc-json cp -r $(XAPIDOC)/markdown $(DESTDIR)$(DOCDIR) cp $(XAPIDOC)/*.dot $(XAPIDOC)/doc-convert.sh $(DESTDIR)$(DOCDIR) # sdk - mkdir -p $(DESTDIR)$(SDKDIR) - cp -r $(XAPISDK)/* $(DESTDIR)$(SDKDIR) - find $(DESTDIR)$(SDKDIR) -type f -exec chmod 644 {} \; + dune install --destdir=$(DESTDIR) --datadir=$(SDKDIR) xapi-sdk find $(DESTDIR) -name '*.cmxs' -delete uninstall: diff --git a/dune b/dune index 2a094a073a9..ac7f4810205 100644 --- a/dune +++ b/dune @@ -17,3 +17,9 @@ ; Can still be used for dependencies, but dune won't scan these dirs ; for dune files (data_only_dirs doc scripts python3 .vscode) + +(install + (package xapi-sdk) + (section share_root) + (files (python3/examples/XenAPI/XenAPI.py as python/XenAPI.py)) +) diff --git a/dune-project b/dune-project index fa6e37a749b..fc627bc550d 100644 --- a/dune-project +++ b/dune-project @@ -2,6 +2,7 @@ (formatting (enabled_for ocaml)) (using menhir 2.0) +(using directory-targets 0.1) (cram enable) (implicit_transitive_deps false) diff --git a/ocaml/doc/README.md b/ocaml/doc/README.md index b30f65d6a2b..ec8cda0dcc9 100644 --- a/ocaml/doc/README.md +++ b/ocaml/doc/README.md @@ -1,11 +1,11 @@ # A note on generating locally the API reference Run `make doc` in the repo root. This will output the API reference in html and -markdown formats in `_build/install/default/xapi/doc`. +markdown formats in `_build/install/default/usr/share/xapi/doc`. Both html and markdown reference images which need to be generated as a separate step from the `.dot` files. This requires `graphviz` to be installed. To generate the images, run `sh doc-convert.sh` in -`_build/install/default/xapi/doc`. Now you can view the API reference by opening -`_build/install/default/xapi/doc/html/index.html` in your browser. +`_build/install/default/usr/share/xapi/doc`. Now you can view the API reference by opening +`_build/install/default/usr/share/xapi/doc/html/index.html` in your browser. diff --git a/ocaml/sdk-gen/c/dune b/ocaml/sdk-gen/c/dune index ca7f44dee18..adbea6905fa 100644 --- a/ocaml/sdk-gen/c/dune +++ b/ocaml/sdk-gen/c/dune @@ -12,11 +12,23 @@ (rule (alias generate) + (package xapi-sdk) + (targets (dir autogen-out)) (deps (:x gen_c_binding.exe) (source_tree templates) + (source_tree autogen) ) - (action (run %{x})) + (action (concurrent + (bash "cp -r autogen/ autogen-out/") + (run %{x}) + )) ) (data_only_dirs templates) + +(install + (package xapi-sdk) + (section share_root) + (dirs (autogen-out as c)) +) diff --git a/ocaml/sdk-gen/c/gen_c_binding.ml b/ocaml/sdk-gen/c/gen_c_binding.ml index 757046ac336..6c9be258967 100644 --- a/ocaml/sdk-gen/c/gen_c_binding.ml +++ b/ocaml/sdk-gen/c/gen_c_binding.ml @@ -16,7 +16,7 @@ module TypeSet = Set.Make (struct let compare = compare end) -let destdir = "autogen" +let destdir = "autogen-out" let templates_dir = "templates" diff --git a/ocaml/sdk-gen/component-test/README.md b/ocaml/sdk-gen/component-test/README.md index 8e68e3e8a6a..3aa563bc60b 100644 --- a/ocaml/sdk-gen/component-test/README.md +++ b/ocaml/sdk-gen/component-test/README.md @@ -51,7 +51,7 @@ jsonrpc-client is a client that imports the SDK and runs the functions, followin 5. To support the SDK component test, it recommended to move the SDK generated to a sub directory as a local module for import purposes, eg: ``` -cp -r ${{ github.workspace }}/_build/install/default/xapi/sdk/go/src jsonrpc-client/go/goSDK +cp -r ${{ github.workspace }}/_build/install/default/share/go/src jsonrpc-client/go/goSDK ``` then, import the local module. ``` diff --git a/ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml b/ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml new file mode 100644 index 00000000000..725d14feb78 --- /dev/null +++ b/ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml @@ -0,0 +1,914 @@ + + + + + + SRInUse + The SR device is currently in use + 16 + + + VDIInUse + The VDI is currently in use + 24 + + + LockErr + The lock/unlock request failed + 37 + + + Unimplemented + The requested method is not supported/implemented + 38 + + + SRNotEmpty + The SR is not empty + 39 + + + + ConfigLUNMissing + The request is missing the LUNid parameter + 87 + + + ConfigSCSIid + The SCSIid parameter is missing or incorrect + 107 + + + + + ISODconfMissingLocation + 'Location' parameter must be specified in Device Configuration + 220 + + + ISOMustHaveISOExtension + ISO name must have .iso extension + 221 + + + ISOMountFailure + Could not mount the directory specified in Device Configuration + 222 + + + ISOUnmountFailure + Could not unmount the directory specified in Device Configuration + 223 + + + ISOSharenameFailure + Could not locate the ISO sharename on the target, or the access permissions may be incorrect. + 224 + + + ISOLocationStringError + Incorrect Location string format. String must be in the format SERVER:PATH for NFS targets, or \\SERVER\PATH for CIFS targets + 225 + + + ISOLocalPath + Invalid local path + 226 + + + ISOInvalidSMBversion + Given SMB version is not allowed. Choose either 1.0 or 3.0 + 227 + + + ISOInvalidXeMountOptions + Require "-o" along with xe-mount-iso-sr + 228 + + + + + InvalidArg + Invalid argument + 1 + + + BadCharacter + A bad character was detected in the dconf string + 2 + + + InvalidDev + No such device + 19 + + + InvalidSecret + No such secret. + 20 + + + + + SRScan + The SR scan failed + 40 + + + SRLog + The SR log operation failed + 41 + + + SRExists + The SR already exists + 42 + + + VDIExists + The VDI already exists + 43 + + + SRNoSpace + There is insufficient space + 44 + + + + VDIUnavailable + The VDI is not available + 46 + + + SRUnavailable + The SR is not available + 47 + + + SRUnknownType + Unknown repository type + 48 + + + SRBadXML + Malformed XML string + 49 + + + LVMCreate + Logical Volume creation error + 50 + + + LVMDelete + Logical Volume deletion error + 51 + + + LVMMount + Logical Volume mount/activate error + 52 + + + LVMUnMount + Logical Volume unmount/deactivate error + 53 + + + LVMWrite + Logical Volume write error + 54 + + + LVMPartCreate + Logical Volume partition creation error + 55 + + + LVMPartInUse + Logical Volume partition in use + 56 + + + LVMFilesystem + Logical Volume filesystem creation error + 57 + + + LVMMaster + Logical Volume request must come from master + 58 + + + LVMResize + Logical Volume resize failed + 59 + + + LVMSize + Logical Volume invalid size + 60 + + + FileSRCreate + File SR creation error + 61 + + + FileSRRmDir + File SR failed to remove directory + 62 + + + FileSRDelete + File SR deletion error + 63 + + + VDIRemove + Failed to remove VDI + 64 + + + VDILoad + Failed to load VDI + 65 + + + VDIType + Invalid VDI type + 66 + + + ISCSIDevice + ISCSI device failed to appear + 67 + + + ISCSILogin + ISCSI login failed - check access settings for the initiator on the storage, if CHAP is used verify CHAP credentials + 68 + + + ISCSILogout + ISCSI logout failed + 69 + + + ISCSIInitiator + Failed to set ISCSI initiator + 70 + + + ISCSIDaemon + Failed to start ISCSI daemon + 71 + + + NFSVersion + Required NFS server version unsupported + 72 + + + NFSMount + NFS mount error + 73 + + + NFSUnMount + NFS unmount error + 74 + + + NFSAttached + NFS mount point already attached + 75 + + + NFSDelete + Failed to remove NFS mount point + 76 + + + NFSTarget + Unable to detect an NFS service on this target. + 108 + + + LVMGroupCreate + Logical Volume group creation failed + 77 + + + VDICreate + VDI Creation failed + 78 + + + VDISize + VDI Invalid size + 79 + + + VDIDelete + Failed to mark VDI hidden + 80 + + + VDIClone + Failed to clone VDI + 81 + + + VDISnapshot + Failed to snapshot VDI + 82 + + + ISCSIDiscovery + ISCSI discovery failed + 83 + + + ISCSIIQN + ISCSI target and received IQNs differ + 84 + + + ISCSIDetach + ISCSI detach failed + 85 + + + ISCSIQueryDaemon + Failed to query the iscsi daemon + 86 + + + + NFSCreate + NFS SR creation error + 88 + + + ConfigLUNIDMissing + The request is missing the LUNid parameter + 89 + + + ConfigDeviceMissing + The request is missing the device parameter + 90 + + + ConfigDeviceInvalid + The device is not a valid path + 91 + + + VolNotFound + The volume cannot be found + 92 + + + PVSfailed + pvs failed + 93 + + + ConfigLocationMissing + The request is missing the location parameter + 94 + + + ConfigTargetMissing + The request is missing the target parameter + 95 + + + ConfigTargetIQNMissing + The request is missing or has an incorrect target IQN parameter + 96 + + + ConfigISCSIIQNMissing + Unable to retrieve the host configuration ISCSI IQN parameter + 97 + + + ConfigLUNSerialMissing + The request is missing the LUN serial number + 98 + + + LVMOneLUN + Only 1 LUN may be used with shared LVM + 99 + + + LVMNoVolume + Cannot find volume + 100 + + + ConfigServerPathMissing + The request is missing the serverpath parameter + 101 + + + ConfigServerMissing + The request is missing the server parameter + 102 + + + ConfigServerPathBad + The serverpath argument is not valid + 103 + + + LVMRefCount + Unable to open the refcount file + 104 + + + Rootdev + Root system device, cannot be used for VM storage + 105 + + + NoRootDev + Root system device not found + 118 + + + InvalidIQN + The IQN provided is an invalid format + 106 + + + SnapshotChainTooLong + The snapshot chain is too long + 109 + + + VDIResize + VDI resize failed + 110 + + + SMBMount + SMB mount error + 111 + + + SMBUnMount + SMB unmount error + 112 + + + SMBAttached + SMB mount point already attached + 113 + + + SMBDelete + Failed to remove SMB mount point + 114 + + + ConfigParamsMissing + Not all required parameters specified. + 115 + + + SMBCreate + Failed to create SMB SR. + 116 + + + + + LVMRead + Logical Volume read error + 117 + + + + + APISession + Failed to initialize XMLRPC connection + 150 + + + APILocalhost + Failed to query Local Control Domain + 151 + + + APIPBDQuery + A Failure occurred querying the PBD entries + 152 + + + APIFailure + A Failure occurred accessing an API object + 153 + + + + + NAPPTarget + Netapp Target parameter missing in Dconf string + 120 + + + NAPPUsername + Netapp Username parameter missing in Dconf string + 121 + + + NAPPPassword + Netapp Password parameter missing in Dconf string + 122 + + + NAPPAggregate + Netapp Aggregate parameter missing in Dconf string + 123 + + + NAPPTargetFailed + Failed to connect to Netapp target + 124 + + + NAPPAuthFailed + Authentication credentials incorrect + 125 + + + NAPPInsufficientPriv + Auth credentials have insufficient access privileges + 126 + + + NAPPFVolNum + Max number of flexvols reached on target. Unable to allocate requested resource. + 127 + + + NAPPSnapLimit + Max number of Snapshots reached on target Volume. Unable to create snapshot. + 128 + + + NAPPSnapNoMem + Insufficient space, unable to create snapshot. + 129 + + + NAPPUnsupportedVersion + Netapp Target version unsupported + 130 + + + NAPPTargetIQN + Unable to retrieve target IQN + 131 + + + NAPPNoISCSIService + ISCSI service not running on the Netapp target. + 132 + + + NAPPAsisLicense + Failed to enable A-SIS for the SR. Requires valid license on the filer. + 133 + + + NAPPAsisError + The filer will not support A-SIS on this aggregate. The license is valid however on some filers A-SIS is limited to smaller aggregates, e.g. FAS3020 max supported aggregate is 1TB. See filer support documentation for details on your model. You must either disable A-SIS support, or re-configure your aggregate to the max supported size. + 134 + + + NAPPExclActivate + Failed to acquire an exclusive lock on the LUN. + 135 + + + DNSError + Incorrect DNS name, unable to resolve. + 140 + + + ISCSITarget + Unable to connect to ISCSI service on target + 141 + + + ISCSIPort + Incorrect value for iSCSI port, must be a number between 1 and 65535 + 142 + + + + BadRequest + Failed to parse the request + 143 + + + VDIMissing + VDI could not be found + 144 + + + + EQLTarget + Equallogic Target parameter missing in Dconf string + 160 + + + EQLUsername + Equallogic Username parameter missing in Dconf string + 161 + + + EQLPassword + Equallogic Password parameter missing in Dconf string + 162 + + + EQLStoragePool + Equallogic StoragePool parameter missing in Dconf string + 163 + + + EQLConnectfail + Failed to connect to Equallogic Array; maximum SSH CLI sessions reached + 164 + + + EQLInvalidSnapReserve + Invalid snap-reserver-percentage value, must be an integer indicating the amount of space, as a percentage of the VDI size, to reserve for snapshots. + 165 + + + EQLInvalidSnapDepletionKey + Invalid snap-depletion value, must be one of 'delete-oldest' or 'volume-offline' + 166 + + + EQLVolOutofSpace + Volume out of space, probably due to insufficient snapshot reserve allocation. + 167 + + + EQLSnapshotOfSnapshot + Cannot create Snapshot of a Snapshot VDI, operation unsupported + 168 + + + EQLPermDenied + Failed to connect to Equallogic Array, Permission denied;username/password invalid + 169 + + + EQLUnsupportedVersion + Equallogic Target version unsupported + 170 + + + EQLTargetPort + Unable to logon to Array. Check IP settings. + 171 + + + EQLInvalidStoragePool + Equallogic StoragePool parameter specified in Dconf string is Invalid + 172 + + + EQLInvalidTargetIP + Equallogic Target parameter specified in Dconf string is Invalid, please specify the correct Group IPaddress + 173 + + + EQLInvalidSNMPResp + Invalid SNMP response received for a CLI command + 174 + + + EQLInvalidVolMetaData + Volume metadata stored in the 'Description' field is invalid, this field contains encoded data and is not user editable + 175 + + + EQLInvalidEOFRecv + Invalid EOF response received for a CLI command + 176 + + + LVMProvisionAttach + Volume Group out of space. The SR is over-provisioned, and out of space. Unable to grow the underlying volume to accommodate the virtual size of the disk. + 180 + + + MetadataError + Error in Metadata volume operation for SR. + 181 + + + ISCSIDelete + ISCSI delete failed + 182 + + + + EIO + General IO error + 200 + + + EGAIN + Currently unavailable, try again + 201 + + + SMGeneral + General backend error + 202 + + + FistPoint + An active FIST point was reached that causes the process to exit abnormally + 203 + + + LeafGCSkip + Gave up on leaf coalesce after leaf grew bigger than before snapshot taken + 204 + + + VBDListNotStable + LVHDRT: found a non-stable VBD + 205 + + + + XMLParse + Unable to parse XML + 413 + + + MultipathdCommsFailure + Failed to communicate with the multipath daemon + 430 + + + MultipathGenericFailure + Multipath generic failure + 431 + + + MultipathMapperPathMissing + Device Mapper path missing + 432 + + + + MultipathDeviceNotAppeared + Device Mapper path not appeared yet + 433 + + + MultipathDeviceNoScsiid + Device Mapper path no SCSI ID supplied + 434 + + + TapdiskAlreadyRunning + The tapdisk is already running + 445 + + + + CIFSExtendedCharsNotSupported + XenServer does not support extended characters in CIFS paths, usernames, passwords, and file names. + 446 + + + + IllegalXMLChar + Illegal XML character. + 447 + + + + UnsupportedKernel + Unsupported kernel: neither 2.6 nor 3.x. + 448 + + + OCFSOneLUN + Only 1 LUN may be used with shared OCFS + 449 + + + OCFSMount + OCFS mount error + 450 + + + OCFSUnMount + OCFS unmount error + 451 + + + OCFSFilesystem + OCFS filesystem creation error + 452 + + + + TapdiskFailed + tapdisk experienced an error + 453 + + + + NoSMBLicense + SMB SR is not licensed on this host + 454 + + + + VGReg + VG Registration failure + 455 + + + + TapdiskDriveEmpty + Unable to attach empty optical drive to VM. + 456 + + + + CBTActivateFailed + Unable to activate changed block tracking. + 457 + + + + CBTDeactivateFailed + Unable to deactivate changed block tracking. + 458 + + + + CBTMetadataInconsistent + Changed block tracking log is in an inconsistent state. + 459 + + + + CBTChangedBlocksError + Failed to calculate changed blocks for given VDIs. + 460 + + + + SharedFileSystemNoWrite + The file system for SR cannot be written to. + 461 + + + + GenericException + SM has thrown a generic python exception + 1200 + + + + diff --git a/ocaml/sdk-gen/csharp/autogen/dune b/ocaml/sdk-gen/csharp/autogen/dune index 738e07d974f..2a9744e4ae6 100644 --- a/ocaml/sdk-gen/csharp/autogen/dune +++ b/ocaml/sdk-gen/csharp/autogen/dune @@ -7,12 +7,5 @@ (action (copy %{deps} %{targets})) ) -(alias - (name generate) - (deps - LICENSE - (source_tree .) - ) -) - (data_only_dirs src) + diff --git a/ocaml/sdk-gen/csharp/dune b/ocaml/sdk-gen/csharp/dune index df6856bfc22..07e2fd42950 100644 --- a/ocaml/sdk-gen/csharp/dune +++ b/ocaml/sdk-gen/csharp/dune @@ -28,22 +28,33 @@ (rule (alias generate) + (targets (dir autogen-out)) (deps (:x gen_csharp_binding.exe) (source_tree templates) - ) - (action (run %{x})) -) - -(rule - (alias generate) - (deps - (:x friendly_error_names.exe) + (:sh ../windows-line-endings.sh) + (source_tree autogen) + (:x2 friendly_error_names.exe) FriendlyErrorNames.resx (:y XE_SR_ERRORCODES.xml) (source_tree templates) ) - (action (run %{x} -s %{y})) + (action + (progn + (concurrent + (bash "cp -r autogen/ autogen-out/") + (run %{x}) + (run %{x2} -s %{y}) + ) + (bash "rm autogen-out/.gitignore") + (bash "%{sh} autogen-out/") + )) ) (data_only_dirs templates) + +(install + (package xapi-sdk) + (section share_root) + (dirs (autogen-out as csharp)) +) diff --git a/ocaml/sdk-gen/csharp/friendly_error_names.ml b/ocaml/sdk-gen/csharp/friendly_error_names.ml index 2cb6a3f9de9..74e4a80995d 100644 --- a/ocaml/sdk-gen/csharp/friendly_error_names.ml +++ b/ocaml/sdk-gen/csharp/friendly_error_names.ml @@ -20,7 +20,7 @@ let _ = let sr_xml = !sr_xml' -let destdir = "autogen/src" +let destdir = "autogen-out/src" let templdir = "templates" diff --git a/ocaml/sdk-gen/csharp/gen_csharp_binding.ml b/ocaml/sdk-gen/csharp/gen_csharp_binding.ml index edaa3a7c7f9..aa65b99b4c3 100644 --- a/ocaml/sdk-gen/csharp/gen_csharp_binding.ml +++ b/ocaml/sdk-gen/csharp/gen_csharp_binding.ml @@ -25,7 +25,7 @@ let get_deprecated_attribute message = | Some versionString -> "[Deprecated(\"" ^ get_release_branding versionString ^ "\")]" -let destdir = "autogen/src" +let destdir = "autogen-out/src" let templdir = "templates" diff --git a/ocaml/sdk-gen/dune b/ocaml/sdk-gen/dune index 4d10d554e4e..76bdaaab2ca 100644 --- a/ocaml/sdk-gen/dune +++ b/ocaml/sdk-gen/dune @@ -2,6 +2,7 @@ (alias (name sdkgen) + (package xapi-sdk) (deps c/gen_c_binding.exe csharp/gen_csharp_binding.exe diff --git a/ocaml/sdk-gen/go/dune b/ocaml/sdk-gen/go/dune index de55ec5cee8..a126ee856bd 100644 --- a/ocaml/sdk-gen/go/dune +++ b/ocaml/sdk-gen/go/dune @@ -27,11 +27,18 @@ (rule (alias generate) + (targets (dir autogen-out)) (deps (:x gen_go_binding.exe) (source_tree templates) + (source_tree autogen) + ) + (action + (concurrent + (bash "cp -r autogen/ autogen-out/") + (run %{x} --destdir autogen-out) + ) ) - (action (run %{x} --destdir autogen)) ) (test @@ -46,3 +53,8 @@ ) (data_only_dirs test_data templates) +(install + (package xapi-sdk) + (section share_root) + (dirs (autogen-out as go)) +) diff --git a/ocaml/sdk-gen/java/autogen/dune b/ocaml/sdk-gen/java/autogen/dune index 0d4efe16d03..da324f0b9d0 100644 --- a/ocaml/sdk-gen/java/autogen/dune +++ b/ocaml/sdk-gen/java/autogen/dune @@ -6,3 +6,4 @@ ) (data_only_dirs xen-api) + diff --git a/ocaml/sdk-gen/java/dune b/ocaml/sdk-gen/java/dune index e83e9fea097..07167296b84 100644 --- a/ocaml/sdk-gen/java/dune +++ b/ocaml/sdk-gen/java/dune @@ -22,12 +22,23 @@ (rule (alias generate) + (targets (dir autogen-out)) (deps LICENSE (:x main.exe) (source_tree templates) + (source_tree autogen) ) - (action (run %{x})) + (action (concurrent + (bash "cp -r autogen/ autogen-out/") + (run %{x}) + )) ) (data_only_dirs templates) + +(install + (package xapi-sdk) + (section share_root) + (dirs (autogen-out as java)) +) diff --git a/ocaml/sdk-gen/java/main.ml b/ocaml/sdk-gen/java/main.ml index ea70c20673c..b025e434964 100644 --- a/ocaml/sdk-gen/java/main.ml +++ b/ocaml/sdk-gen/java/main.ml @@ -292,7 +292,7 @@ let generate_snapshot_hack = ) ) ^ {| - default: + default: throw new RuntimeException("Internal error in auto-generated code whilst unmarshalling event snapshot"); } record.snapshot = b;|} @@ -811,13 +811,13 @@ let populate_class cls templdir class_dir = let _ = let templdir = "templates" in - let class_dir = "autogen/xen-api/src/main/java/com/xensource/xenapi" in + let class_dir = "autogen-out/xen-api/src/main/java/com/xensource/xenapi" in populate_releases templdir class_dir ; List.iter (fun cls -> populate_class cls templdir class_dir) classes ; populate_types types templdir class_dir ; let uncommented_license = string_of_file "LICENSE" in - let filename = "autogen/xen-api/src/main/resources/LICENSE" in + let filename = "autogen-out/xen-api/src/main/resources/LICENSE" in Xapi_stdext_unix.Unixext.mkdir_rec (Filename.dirname filename) 0o755 ; let class_license = open_out filename in output_string class_license uncommented_license diff --git a/ocaml/sdk-gen/powershell/autogen/dune b/ocaml/sdk-gen/powershell/autogen/dune index 61e1f86a0a4..c4c2a5f8633 100644 --- a/ocaml/sdk-gen/powershell/autogen/dune +++ b/ocaml/sdk-gen/powershell/autogen/dune @@ -15,3 +15,8 @@ ) (data_only_dirs src) +(install + (package xapi-sdk) + (section doc) + (files (glob_files_rec (autogen/* with_prefix powershell))) +) diff --git a/ocaml/sdk-gen/powershell/dune b/ocaml/sdk-gen/powershell/dune index 826885af543..7eb4d3e56d6 100644 --- a/ocaml/sdk-gen/powershell/dune +++ b/ocaml/sdk-gen/powershell/dune @@ -12,11 +12,27 @@ (rule (alias generate) + (targets (dir autogen-out)) (deps (:x gen_powershell_binding.exe) (source_tree templates) + (:sh ../windows-line-endings.sh) + (source_tree autogen) ) - (action (run %{x})) + (action + (progn + (concurrent + (bash "cp -r autogen/ autogen-out/") + (run %{x}) + ) + (bash "rm autogen-out/.gitignore") + (bash "%{sh} autogen-out/") + )) ) (data_only_dirs templates) +(install + (package xapi-sdk) + (section share_root) + (dirs (autogen-out as powershell)) +) diff --git a/ocaml/sdk-gen/powershell/gen_powershell_binding.ml b/ocaml/sdk-gen/powershell/gen_powershell_binding.ml index 7fe02d07003..f7184672397 100644 --- a/ocaml/sdk-gen/powershell/gen_powershell_binding.ml +++ b/ocaml/sdk-gen/powershell/gen_powershell_binding.ml @@ -17,7 +17,7 @@ module TypeSet = Set.Make (struct let compare = compare end) -let destdir = "autogen/src" +let destdir = "autogen-out/src" let templdir = "templates" diff --git a/ocaml/sdk-gen/windows-line-endings.sh b/ocaml/sdk-gen/windows-line-endings.sh old mode 100644 new mode 100755 From 2c9253dfd40c1751fa0cf8d2f70c7154f987d80b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Sat, 14 Sep 2024 10:12:57 +0100 Subject: [PATCH 017/141] CP-51479: [maintenance]: make xapi-xenopsd, and rrd-plugins.lib internal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To avoid the .spec build complaining about newly installed files. These aren't used outside of XAPI anyway. Remove test on xenopsd.cmxs, which is no longer built (there is still a test on the simulator and main XAPI). Signed-off-by: Edwin Török --- dune-project | 7 +-- ocaml/tests/dune | 2 +- ocaml/xapi/dune | 2 +- ocaml/xcp-rrdd/bin/read-blktap-stats/dune | 2 +- ocaml/xcp-rrdd/bin/rrdd/dune | 4 +- ocaml/xcp-rrdd/bin/rrdp-dcmi/dune | 2 +- ocaml/xcp-rrdd/bin/rrdp-iostat/dune | 2 +- ocaml/xcp-rrdd/bin/rrdp-squeezed/dune | 2 +- ocaml/xcp-rrdd/bin/rrdp-xenpm/dune | 2 +- ocaml/xcp-rrdd/lib/blktap/lib/dune | 1 - ocaml/xcp-rrdd/lib/rrdd/dune | 1 - ocaml/xenopsd/c_stubs/dune | 6 +-- ocaml/xenopsd/dbgring/dune | 2 +- ocaml/xenopsd/lib/dune | 5 +- ocaml/xenopsd/simulator/dune | 2 +- ocaml/xenopsd/suspend_image_viewer/dune | 2 +- ocaml/xenopsd/test/dune | 13 +---- ocaml/xenopsd/xc/dune | 12 ++--- xapi-xenopsd-simulator.opam | 18 ++++++- xapi-xenopsd-simulator.opam.template | 18 ++++++- xapi-xenopsd-xc.opam | 6 ++- xapi-xenopsd-xc.opam.template | 6 ++- xapi-xenopsd.opam | 58 ----------------------- xapi-xenopsd.opam.template | 56 ---------------------- xapi.opam | 3 +- 25 files changed, 72 insertions(+), 162 deletions(-) delete mode 100644 xapi-xenopsd.opam delete mode 100644 xapi-xenopsd.opam.template diff --git a/dune-project b/dune-project index fc627bc550d..45875bb4565 100644 --- a/dune-project +++ b/dune-project @@ -105,10 +105,6 @@ (name xapi-xenopsd-cli) ) -(package - (name xapi-xenopsd) -) - (package (name xapi-types) ) @@ -348,6 +344,7 @@ domain-name (ezxenstore (= :version)) fmt + fd-send-recv hex (http-lib (and :with-test (= :version))) ; the public library is only used for testing integers @@ -384,6 +381,7 @@ tar-unix uri (uuid (= :version)) + uutf uuidm x509 xapi-backtrace @@ -404,7 +402,6 @@ (xapi-test-utils :with-test) (xapi-tracing (= :version)) (xapi-types (= :version)) - (xapi-xenopsd (= :version)) xenctrl ; for quicktest xenstore_transport xmlm diff --git a/ocaml/tests/dune b/ocaml/tests/dune index 9a08b1ea6d2..818d9288e70 100644 --- a/ocaml/tests/dune +++ b/ocaml/tests/dune @@ -50,7 +50,7 @@ xapi-tracing xapi-types xapi-stdext-pervasives - xapi-xenopsd + xapi_xenopsd xml-light2 ) (deps diff --git a/ocaml/xapi/dune b/ocaml/xapi/dune index e83dc8cd784..acf3cb5ae66 100644 --- a/ocaml/xapi/dune +++ b/ocaml/xapi/dune @@ -198,7 +198,7 @@ xapi-tracing xapi-tracing-export xapi_version - xapi-xenopsd + xapi_xenopsd xenstore_transport.unix xml-light2 xmlm diff --git a/ocaml/xcp-rrdd/bin/read-blktap-stats/dune b/ocaml/xcp-rrdd/bin/read-blktap-stats/dune index 9c6e2315d6f..72861b33506 100644 --- a/ocaml/xcp-rrdd/bin/read-blktap-stats/dune +++ b/ocaml/xcp-rrdd/bin/read-blktap-stats/dune @@ -5,7 +5,7 @@ (public_name xcp-rrdd-read-blktap-stats) (libraries cstruct - rrdd-plugins.libs + rrdd_plugins_libs unix ) ) diff --git a/ocaml/xcp-rrdd/bin/rrdd/dune b/ocaml/xcp-rrdd/bin/rrdd/dune index e01e010a77f..9b0dbbbab29 100644 --- a/ocaml/xcp-rrdd/bin/rrdd/dune +++ b/ocaml/xcp-rrdd/bin/rrdd/dune @@ -27,7 +27,7 @@ xapi-log xapi-rrd xapi-rrd.unix - xapi-rrdd + rrdd_libs xapi-stdext-threads xapi-stdext-unix xmlm @@ -64,7 +64,7 @@ xapi-idl.rrd xapi-log xapi-rrd - xapi-rrdd + rrdd_libs xapi-stdext-pervasives xapi-stdext-threads xapi-stdext-unix diff --git a/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune b/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune index 6e422954c79..a6e092c3843 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune @@ -6,7 +6,7 @@ (libraries rrdd-plugin - rrdd-plugins.libs + rrdd_plugins_libs xapi-idl.rrd xapi-log xapi-rrd diff --git a/ocaml/xcp-rrdd/bin/rrdp-iostat/dune b/ocaml/xcp-rrdd/bin/rrdp-iostat/dune index 7933a9a3fdc..1bf3c8ed4f0 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-iostat/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-iostat/dune @@ -12,7 +12,7 @@ mtime mtime.clock.os rrdd-plugin - rrdd-plugins.libs + rrdd_plugins_libs str stringext threads.posix diff --git a/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune b/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune index 955b2bdecb9..acad6c3dfe9 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune @@ -6,7 +6,7 @@ (libraries rrdd-plugin - rrdd-plugins.libs + rrdd_plugins_libs xapi-stdext-std ezxenstore ezxenstore.watch diff --git a/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune b/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune index f28b84ef511..86340e6796e 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune @@ -6,7 +6,7 @@ (libraries rrdd-plugin - rrdd-plugins.libs + rrdd_plugins_libs str xapi-idl.rrd xapi-log diff --git a/ocaml/xcp-rrdd/lib/blktap/lib/dune b/ocaml/xcp-rrdd/lib/blktap/lib/dune index a96846c1fc8..bc79ab629d0 100644 --- a/ocaml/xcp-rrdd/lib/blktap/lib/dune +++ b/ocaml/xcp-rrdd/lib/blktap/lib/dune @@ -1,6 +1,5 @@ (library (name rrdd_plugins_libs) - (public_name rrdd-plugins.libs) (wrapped false) (preprocess (pps ppx_cstruct)) (libraries diff --git a/ocaml/xcp-rrdd/lib/rrdd/dune b/ocaml/xcp-rrdd/lib/rrdd/dune index 72e87db40bd..dd63ed88761 100644 --- a/ocaml/xcp-rrdd/lib/rrdd/dune +++ b/ocaml/xcp-rrdd/lib/rrdd/dune @@ -1,6 +1,5 @@ (library (name rrdd_libs) - (public_name xapi-rrdd) (modules constants stats) (flags (:standard -bin-annot)) (libraries diff --git a/ocaml/xenopsd/c_stubs/dune b/ocaml/xenopsd/c_stubs/dune index 7b2de7bf421..f22b2ea896c 100644 --- a/ocaml/xenopsd/c_stubs/dune +++ b/ocaml/xenopsd/c_stubs/dune @@ -1,6 +1,5 @@ (library - (name c_stubs) - (public_name xapi-xenopsd.c_stubs) + (name xapi_xenopsd_c_stubs) (wrapped false) (foreign_stubs (language c) @@ -9,8 +8,7 @@ ) (library - (name xc_stubs) - (public_name xapi-xenopsd-xc.c_stubs) + (name xapi_xenopsd_xc_c_stubs) (wrapped false) (libraries xenctrl) (foreign_stubs diff --git a/ocaml/xenopsd/dbgring/dune b/ocaml/xenopsd/dbgring/dune index 3d95198039f..c3d29f9e5be 100644 --- a/ocaml/xenopsd/dbgring/dune +++ b/ocaml/xenopsd/dbgring/dune @@ -4,7 +4,7 @@ (package xapi-xenopsd-xc) (libraries - xapi-xenopsd + xapi_xenopsd xenctrl xenmmap xenstore diff --git a/ocaml/xenopsd/lib/dune b/ocaml/xenopsd/lib/dune index 85377322942..2810eb88ef3 100644 --- a/ocaml/xenopsd/lib/dune +++ b/ocaml/xenopsd/lib/dune @@ -1,10 +1,9 @@ (library - (name xenopsd) - (public_name xapi-xenopsd) + (name xapi_xenopsd) (wrapped false) (libraries astring - c_stubs + xapi_xenopsd_c_stubs cohttp cohttp_posix fd-send-recv diff --git a/ocaml/xenopsd/simulator/dune b/ocaml/xenopsd/simulator/dune index 740b6d9b9e0..9274834e964 100644 --- a/ocaml/xenopsd/simulator/dune +++ b/ocaml/xenopsd/simulator/dune @@ -5,7 +5,7 @@ (libraries xapi-idl.xen.interface - xapi-xenopsd + xapi_xenopsd ) ) diff --git a/ocaml/xenopsd/suspend_image_viewer/dune b/ocaml/xenopsd/suspend_image_viewer/dune index afe650c6a6e..47b8ced2a92 100644 --- a/ocaml/xenopsd/suspend_image_viewer/dune +++ b/ocaml/xenopsd/suspend_image_viewer/dune @@ -11,6 +11,6 @@ xapi-idl xapi-log xapi-stdext-unix - xapi-xenopsd + xapi_xenopsd ) ) diff --git a/ocaml/xenopsd/test/dune b/ocaml/xenopsd/test/dune index a71ad643db9..2d3d34cc709 100644 --- a/ocaml/xenopsd/test/dune +++ b/ocaml/xenopsd/test/dune @@ -1,7 +1,7 @@ (test (name test) (modes exe) - (package xapi-xenopsd) + (package xapi-xenopsd-xc) (libraries alcotest cpuid @@ -16,7 +16,7 @@ xapi-log xapi-stdext-pervasives xapi-test-utils - xapi-xenopsd + xapi_xenopsd xenstore_transport.unix ) (preprocess @@ -24,15 +24,6 @@ ) ) -(rule - (alias runtest) - (package xapi-xenopsd) - (deps - (:x ../lib/xenopsd.cmxs) - ) - (action (run ./check-no-xenctrl.sh %{x})) -) - (rule (alias runtest) (package xapi-xenopsd-simulator) diff --git a/ocaml/xenopsd/xc/dune b/ocaml/xenopsd/xc/dune index b841da23fbc..06ba1a676e5 100644 --- a/ocaml/xenopsd/xc/dune +++ b/ocaml/xenopsd/xc/dune @@ -51,9 +51,9 @@ xapi-stdext-std xapi-stdext-threads xapi-stdext-unix - xapi-xenopsd - xapi-xenopsd.c_stubs - xapi-xenopsd-xc.c_stubs + xapi_xenopsd + xapi_xenopsd_c_stubs + xapi_xenopsd_xc_c_stubs xenctrl xenstore xenstore_transport.unix @@ -81,7 +81,7 @@ xapi-idl.xen.interface xapi-inventory xapi-stdext-unix - xapi-xenopsd + xapi_xenopsd xenctrl xenstore_transport.unix xenopsd_xc @@ -115,7 +115,7 @@ xapi-stdext-date xapi-stdext-unix - xapi-xenopsd + xapi_xenopsd xenctrl ) ) @@ -141,7 +141,7 @@ ezxenstore.core threads.posix xapi-idl.xen.interface - xapi-xenopsd + xapi_xenopsd xenctrl xenopsd_xc xenstore_transport.unix diff --git a/xapi-xenopsd-simulator.opam b/xapi-xenopsd-simulator.opam index 45cffcfc82a..16017fb218a 100644 --- a/xapi-xenopsd-simulator.opam +++ b/xapi-xenopsd-simulator.opam @@ -15,7 +15,23 @@ depends: [ "ocaml" "dune" {>= "3.15"} "base-unix" - "xapi-xenopsd" + "astring" + "cohttp" + "fd-send-recv" + "fmt" + "ppx_deriving_rpc" + "ppx_sexp_conv" + "re" + "result" + "rpclib" + "rresult" + "sexplib" + "sexplib0" + "uri" + "uuidm" + "uutf" + "xapi-backtrace" + "xmlm" ] synopsis: "Simulation backend allowing testing of the higher-level xenops logic" diff --git a/xapi-xenopsd-simulator.opam.template b/xapi-xenopsd-simulator.opam.template index b23fb7ea5fa..a11d9c74c19 100644 --- a/xapi-xenopsd-simulator.opam.template +++ b/xapi-xenopsd-simulator.opam.template @@ -13,7 +13,23 @@ depends: [ "ocaml" "dune" {>= "3.15"} "base-unix" - "xapi-xenopsd" + "astring" + "cohttp" + "fd-send-recv" + "fmt" + "ppx_deriving_rpc" + "ppx_sexp_conv" + "re" + "result" + "rpclib" + "rresult" + "sexplib" + "sexplib0" + "uri" + "uuidm" + "uutf" + "xapi-backtrace" + "xmlm" ] synopsis: "Simulation backend allowing testing of the higher-level xenops logic" diff --git a/xapi-xenopsd-xc.opam b/xapi-xenopsd-xc.opam index 53eb84adf96..1ebd0e0073c 100644 --- a/xapi-xenopsd-xc.opam +++ b/xapi-xenopsd-xc.opam @@ -15,12 +15,14 @@ build: [ depends: [ "ocaml" "dune" {>= "3.15"} + "alcotest" {with-test} "astring" "base-threads" "base-unix" "base64" "bos" "cmdliner" + "cohttp" "conf-xen" "ezxenstore" "fd-send-recv" @@ -36,6 +38,7 @@ depends: [ "result" "rpclib" "rresult" + "sexplib" "sexplib0" "uri" "uuid" @@ -49,11 +52,12 @@ depends: [ "xapi-stdext-std" "xapi-stdext-threads" "xapi-stdext-unix" - "xapi-xenopsd" + "xapi-test-utils" {with-test} "xenctrl" "xenstore" "xenstore_transport" "xenmmap" + "xmlm" ] synopsis: "A xenops plugin which knows how to use xenstore, xenctrl and xenguest to manage" diff --git a/xapi-xenopsd-xc.opam.template b/xapi-xenopsd-xc.opam.template index ef97615d427..e0dd497cde1 100644 --- a/xapi-xenopsd-xc.opam.template +++ b/xapi-xenopsd-xc.opam.template @@ -13,12 +13,14 @@ build: [ depends: [ "ocaml" "dune" {>= "3.15"} + "alcotest" {with-test} "astring" "base-threads" "base-unix" "base64" "bos" "cmdliner" + "cohttp" "conf-xen" "ezxenstore" "fd-send-recv" @@ -34,6 +36,7 @@ depends: [ "result" "rpclib" "rresult" + "sexplib" "sexplib0" "uri" "uuid" @@ -47,11 +50,12 @@ depends: [ "xapi-stdext-std" "xapi-stdext-threads" "xapi-stdext-unix" - "xapi-xenopsd" + "xapi-test-utils" {with-test} "xenctrl" "xenstore" "xenstore_transport" "xenmmap" + "xmlm" ] synopsis: "A xenops plugin which knows how to use xenstore, xenctrl and xenguest to manage" diff --git a/xapi-xenopsd.opam b/xapi-xenopsd.opam deleted file mode 100644 index f255a13cc30..00000000000 --- a/xapi-xenopsd.opam +++ /dev/null @@ -1,58 +0,0 @@ -# This file is generated by dune, edit dune-project instead -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -opam-version: "2.0" -name: "xapi-xenopsd" -maintainer: "xen-api@lists.xen.org" -authors: "xen-api@lists.xen.org" -homepage: "https://github.com/xapi-project/xen-api" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -build: [ - ["./configure"] - ["dune" "build" "-p" name "-j" jobs] - ["dune" "runtest" "-p" name "-j" jobs] {with-test} -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "base-threads" - "alcotest" {with-test} - "astring" - "cohttp" - "fd-send-recv" - "fmt" { >= "0.8.8" } - "forkexec" - "ppx_deriving_rpc" - "ppx_sexp_conv" - "re" - "result" - "rpclib" - "rresult" - "sexplib" - "sexplib0" - "uri" - "uuid" - "uuidm" - "uutf" - "xapi-backtrace" - "xapi-idl" - "xapi-stdext-date" - "xapi-stdext-pervasives" - "xapi-stdext-threads" - "xapi-stdext-unix" - "xapi-test-utils" {with-test} - "xapi-tracing" - "xapi-tracing-export" - "xenstore_transport" {with-test} - "xmlm" - "zstd" -] -synopsis: "A single-host domain/VM manager for the Xen hypervisor" -description: """ -The xenopsd daemon allows a set of VMs on a single host to be controlled -via a simple API. The API has been tailored to suit the needs of xapi, -which manages clusters of hosts running Xen, but it can also be used -standalone.""" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi-xenopsd.opam.template b/xapi-xenopsd.opam.template deleted file mode 100644 index 93961549f71..00000000000 --- a/xapi-xenopsd.opam.template +++ /dev/null @@ -1,56 +0,0 @@ -opam-version: "2.0" -name: "xapi-xenopsd" -maintainer: "xen-api@lists.xen.org" -authors: "xen-api@lists.xen.org" -homepage: "https://github.com/xapi-project/xen-api" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -build: [ - ["./configure"] - ["dune" "build" "-p" name "-j" jobs] - ["dune" "runtest" "-p" name "-j" jobs] {with-test} -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "base-threads" - "alcotest" {with-test} - "astring" - "cohttp" - "fd-send-recv" - "fmt" { >= "0.8.8" } - "forkexec" - "ppx_deriving_rpc" - "ppx_sexp_conv" - "re" - "result" - "rpclib" - "rresult" - "sexplib" - "sexplib0" - "uri" - "uuid" - "uuidm" - "uutf" - "xapi-backtrace" - "xapi-idl" - "xapi-stdext-date" - "xapi-stdext-pervasives" - "xapi-stdext-threads" - "xapi-stdext-unix" - "xapi-test-utils" {with-test} - "xapi-tracing" - "xapi-tracing-export" - "xenstore_transport" {with-test} - "xmlm" - "zstd" -] -synopsis: "A single-host domain/VM manager for the Xen hypervisor" -description: """ -The xenopsd daemon allows a set of VMs on a single host to be controlled -via a simple API. The API has been tailored to suit the needs of xapi, -which manages clusters of hosts running Xen, but it can also be used -standalone.""" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi.opam b/xapi.opam index 4179ebc22ed..e533b5127eb 100644 --- a/xapi.opam +++ b/xapi.opam @@ -27,6 +27,7 @@ depends: [ "domain-name" "ezxenstore" {= version} "fmt" + "fd-send-recv" "hex" "http-lib" {with-test & = version} "integers" @@ -63,6 +64,7 @@ depends: [ "tar-unix" "uri" "uuid" {= version} + "uutf" "uuidm" "x509" "xapi-backtrace" @@ -83,7 +85,6 @@ depends: [ "xapi-test-utils" {with-test} "xapi-tracing" {= version} "xapi-types" {= version} - "xapi-xenopsd" {= version} "xenctrl" "xenstore_transport" "xmlm" From ffef021b8b34a31abb630df147bb3a1f927a1090 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 11 Sep 2024 16:13:42 +0100 Subject: [PATCH 018/141] CP-51479: [maintenance]: install some more files using dune, add @xapi-doc rule MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit More efficient than install.sh. Also enables merging opam packages more easily, as it makes it clearer which package installs files where. 'xapi' and 'xe' are special cases because they install to /opt/xensource, so add a separate line with a different --prefix for them. We need a new xapi-debug package to install to /opt/xensource/debug which is a non-FHS compliant path, and we override its bindir to be /opt/xensource/debug. `vhd-tool` is special too, since it installs some files to /usr/libexec/xapi, but xapi itself is in /opt/xensource and its libexec would be /opt/xensource/libexec. Dune doesn't support installing to arbitrary directories, there is a 'misc' rule for that, but it is deprecated so I wouldn't start using it now. These rules generate multiple files in a directory, use dune's directory target support. Previously we would've copied the files multiple times. This is quite difficult to review, I suggest reviewing the outcome of the PR: On b1238325aa622c3e07ff7429af5672d7914aacca (do not installs cmxs commit): ``` rm /tmp/inst/1 -rf; make build install DESTDIR=/tmp/inst/1 && (cd /tmp/inst/1 && find ! -type d -printf '%p %M\n' ) >|ref ``` On the final commit of this PR: ``` rm /tmp/inst/2 -rf; make build install DESTDIR=/tmp/inst/2 && (cd /tmp/inst/2 && find ! -type d -printf '%p %M\n' ) >|this && sort -u ref >|refs && sort -u this >|thiss && diff -wu refs thiss ``` Signed-off-by: Edwin Török --- Makefile | 124 +++--------------- dune-project | 61 ++++++++- ocaml/alerts/certificate/dune | 7 +- ocaml/cdrommon/dune | 7 +- ocaml/database/dune | 8 +- ocaml/doc/dune | 26 +++- ocaml/events/dune | 2 +- ocaml/gencert/dune | 8 +- .../management-api.md | 0 ocaml/idl/autogen/dune | 6 - ocaml/idl/dune | 20 ++- ocaml/license/dune | 7 +- ocaml/networkd/bin/dune | 19 ++- ocaml/perftest/dune | 2 +- ocaml/quicktest/dune | 3 +- ocaml/rrd2csv/src/dune | 3 +- ocaml/squeezed/src/dune | 8 +- ocaml/vhd-tool/cli/dune | 30 ++++- ocaml/vncproxy/dune | 2 +- ocaml/wsproxy/cli/dune | 4 + ocaml/xapi/dune | 5 +- ocaml/xcp-rrdd/bin/read-blktap-stats/dune | 5 +- ocaml/xcp-rrdd/bin/rrdd/dune | 7 +- ocaml/xcp-rrdd/bin/rrdp-dcmi/dune | 7 +- ocaml/xcp-rrdd/bin/rrdp-iostat/dune | 7 +- ocaml/xcp-rrdd/bin/rrdp-squeezed/dune | 7 +- ocaml/xcp-rrdd/bin/rrdp-xenpm/dune | 7 +- ocaml/xe-cli/dune | 1 - ocaml/xenopsd/cli/dune | 8 +- ocaml/xenopsd/dbgring/dune | 4 +- ocaml/xenopsd/dune | 1 - ocaml/xenopsd/pvs/dune | 10 +- ocaml/xenopsd/scripts/dune | 13 ++ ocaml/xenopsd/simulator/dune | 8 +- ocaml/xenopsd/suspend_image_viewer/dune | 2 +- ocaml/xenopsd/tools/dune | 8 +- ocaml/xenopsd/xc/dune | 8 +- ocaml/xenopsd/xc/fence/dune | 8 +- ocaml/xs-trace/dune | 13 +- rrd2csv.opam | 29 ---- rrd2csv.opam.template | 27 ---- xapi-debug.opam | 76 +++++++++++ xapi-squeezed.opam | 2 +- xapi-squeezed.opam.template | 2 +- xapi-xenopsd-xc.opam | 10 +- xapi-xenopsd-xc.opam.template | 10 +- xapi.opam | 2 +- 47 files changed, 386 insertions(+), 248 deletions(-) rename ocaml/idl/{autogen => autogen-static}/management-api.md (100%) delete mode 100644 ocaml/idl/autogen/dune delete mode 100644 ocaml/xenopsd/dune create mode 100644 ocaml/xenopsd/scripts/dune delete mode 100644 rrd2csv.opam delete mode 100644 rrd2csv.opam.template create mode 100644 xapi-debug.opam diff --git a/Makefile b/Makefile index 3dd3947f2eb..64397752bdf 100644 --- a/Makefile +++ b/Makefile @@ -80,22 +80,7 @@ schema: dune runtest ocaml/idl doc: -#html - dune build --profile=$(PROFILE) -f @ocaml/doc/jsapigen - mkdir -p $(XAPIDOC)/html - cp -r _build/default/ocaml/doc/api $(XAPIDOC)/html - cp _build/default/ocaml/doc/branding.js $(XAPIDOC)/html - cp ocaml/doc/*.js ocaml/doc/*.html ocaml/doc/*.css $(XAPIDOC)/html -#markdown - dune build --profile=$(PROFILE) -f @ocaml/idl/markdowngen - mkdir -p $(XAPIDOC)/markdown - cp -r _build/default/ocaml/idl/autogen/*.md $(XAPIDOC)/markdown - cp -r _build/default/ocaml/idl/autogen/*.yml $(XAPIDOC)/markdown - find ocaml/doc -name "*.md" -not -name "README.md" -exec cp {} $(XAPIDOC)/markdown/ \; -#other - cp ocaml/doc/*.dot ocaml/doc/doc-convert.sh $(XAPIDOC) -# Build manpages, networkd generated these - dune build --profile=$(PROFILE) -f @man + dune build --profile=$(PROFILE) @xapi-doc sdk: dune build --profile=$(PROFILE) @sdkgen xapi-sdk.install @ocaml/sdk-gen/install @@ -113,8 +98,7 @@ sdk-build-java: sdk python: $(MAKE) -C python3/examples build -doc-json: - dune exec --profile=$(PROFILE) -- ocaml/idl/json_backend/gen_json.exe -destdir $(XAPIDOC)/jekyll +doc-json: doc format: dune build @fmt --auto-promote @@ -134,120 +118,51 @@ install: build doc sdk doc-json mkdir -p $(DESTDIR)/etc mkdir -p $(DESTDIR)/etc/bash_completion.d # ocaml/xapi - make -C scripts install - make -C python3 install - cp -f _build/install/default/bin/xapi $(DESTDIR)$(OPTDIR)/bin/xapi + $(MAKE) -C scripts install + $(MAKE) -C python3 install scripts/install.sh 755 ocaml/quicktest/quicktest $(DESTDIR)$(OPTDIR)/debug - cp -f _build/install/default/bin/quicktestbin $(DESTDIR)$(OPTDIR)/debug/quicktestbin - scripts/install.sh 644 _build/install/default/share/xapi/rbac_static.csv $(DESTDIR)$(OPTDIR)/debug -# ocaml/xsh - cp -f _build/install/default/bin/xsh $(DESTDIR)$(OPTDIR)/bin/xsh # ocaml/xe-cli - scripts/install.sh 755 _build/install/default/bin/xe $(DESTDIR)$(OPTDIR)/bin/xe ln -sf $(OPTDIR)/bin/xe $(DESTDIR)/usr/bin/xe scripts/install.sh 755 ocaml/xe-cli/bash-completion $(DESTDIR)/etc/bash_completion.d/xe -# ocaml/vncproxy - scripts/install.sh 755 _build/install/default/bin/vncproxy $(DESTDIR)$(OPTDIR)/debug/vncproxy -# ocaml/perftest - scripts/install.sh 755 _build/install/default/bin/perftest $(DESTDIR)$(OPTDIR)/debug/perftest -# ocaml/suspend-image-viewer - scripts/install.sh 755 _build/install/default/bin/suspend-image-viewer $(DESTDIR)$(OPTDIR)/debug/suspend-image-viewer -# ocaml/mpathalert - scripts/install.sh 755 _build/install/default/bin/mpathalert $(DESTDIR)$(OPTDIR)/bin/mpathalert -# ocaml/license - scripts/install.sh 755 _build/install/default/bin/daily-license-check $(DESTDIR)$(LIBEXECDIR)/daily-license-check -# ocaml/alerts/certificate - scripts/install.sh 755 _build/install/default/bin/alert-certificate-check $(DESTDIR)$(LIBEXECDIR)/alert-certificate-check -# ocaml/events - scripts/install.sh 755 _build/install/default/bin/event_listen $(DESTDIR)$(OPTDIR)/debug/event_listen -# ocaml/db_process - scripts/install.sh 755 _build/install/default/bin/xapi-db-process $(DESTDIR)$(OPTDIR)/bin/xapi-db-process -# ocaml/cdrommon - scripts/install.sh 755 _build/install/default/bin/cdrommon $(DESTDIR)$(LIBEXECDIR)/cdrommon -# ocaml/database - scripts/install.sh 755 _build/install/default/bin/block_device_io $(DESTDIR)$(LIBEXECDIR)/block_device_io -# ocaml/gencert - scripts/install.sh 755 _build/install/default/bin/gencert $(DESTDIR)$(LIBEXECDIR)/gencert -# ocaml/rrd2csv - scripts/install.sh 755 _build/install/default/bin/rrd2csv $(DESTDIR)$(OPTDIR)/bin/rrd2csv +# rrd2csv scripts/install.sh 644 ocaml/rrd2csv/man/rrd2csv.1.man $(DESTDIR)$(OPTMANDIR)/rrd2csv.1 -# ocaml/xs-trace - scripts/install.sh 755 _build/install/default/bin/xs-trace $(DESTDIR)/usr/bin/xs-trace -# xcp-rrdd - install -D _build/install/default/bin/xcp-rrdd $(DESTDIR)/usr/sbin/xcp-rrdd - install -D _build/install/default/bin/rrddump $(DESTDIR)/usr/bin/rrddump -# rrd-cli - install -D _build/install/default/bin/rrd-cli $(DESTDIR)/usr/bin/rrd-cli -# rrd-transport - install -D _build/install/default/bin/rrdreader $(DESTDIR)/usr/bin/rrdreader - install -D _build/install/default/bin/rrdwriter $(DESTDIR)/usr/bin/rrdwriter # rrdd-plugins - install -D -m 755 _build/install/default/bin/xcp-rrdd-iostat $(DESTDIR)$(LIBEXECDIR)/xcp-rrdd-plugins/xcp-rrdd-iostat - install -D -m 755 _build/install/default/bin/xcp-rrdd-squeezed $(DESTDIR)$(LIBEXECDIR)/xcp-rrdd-plugins/xcp-rrdd-squeezed - install -D -m 755 _build/install/default/bin/xcp-rrdd-xenpm $(DESTDIR)$(LIBEXECDIR)/xcp-rrdd-plugins/xcp-rrdd-xenpm - install -D -m 755 _build/install/default/bin/xcp-rrdd-dcmi $(DESTDIR)$(LIBEXECDIR)/xcp-rrdd-plugins/xcp-rrdd-dcmi install -D -m 644 ocaml/xcp-rrdd/bugtool-plugin/rrdd-plugins.xml $(DESTDIR)$(ETCXENDIR)/bugtool/xcp-rrdd-plugins.xml install -D -m 644 ocaml/xcp-rrdd/bugtool-plugin/rrdd-plugins/stuff.xml $(DESTDIR)$(ETCXENDIR)/bugtool/xcp-rrdd-plugins/stuff.xml install -D -m 755 ocaml/xcp-rrdd/bin/rrdp-scripts/sysconfig-rrdd-plugins $(DESTDIR)/etc/sysconfig/xcp-rrdd-plugins install -D -m 644 ocaml/xcp-rrdd/bin/rrdp-scripts/logrotate-rrdd-plugins $(DESTDIR)/etc/logrotate.d/xcp-rrdd-plugins # vhd-tool - install -m 755 _build/install/default/bin/sparse_dd $(DESTDIR)/usr/libexec/xapi/sparse_dd - install -m 755 _build/install/default/bin/vhd-tool $(DESTDIR)/usr/bin/vhd-tool install -m 644 ocaml/vhd-tool/cli/sparse_dd.conf $(DESTDIR)/etc/sparse_dd.conf - install -m 755 _build/install/default/bin/get_vhd_vsize $(DESTDIR)/usr/libexec/xapi/get_vhd_vsize - install -m 755 ocaml/vhd-tool/scripts/get_nbd_extents.py $(DESTDIR)$(LIBEXECDIR)/get_nbd_extents.py - install -m 644 ocaml/vhd-tool/scripts/python_nbd_client.py $(DESTDIR)$(LIBEXECDIR)/python_nbd_client.py # xenopsd - install -D _build/install/default/bin/xenopsd-simulator $(DESTDIR)/$(SBINDIR)/xenopsd-simulator - install -D _build/install/default/man/man1/xenopsd-simulator.1.gz $(DESTDIR)/$(MANDIR)/man1/xenopsd-simulator.1.gz - install -D _build/install/default/bin/xenopsd-xc $(DESTDIR)/$(SBINDIR)/xenopsd-xc - install -D _build/install/default/bin/fence.bin $(DESTDIR)/$(LIBEXECDIR)/fence.bin - install -D _build/install/default/man/man1/xenopsd-xc.1.gz $(DESTDIR)/$(MANDIR)/man1/xenopsd-xc.1.gz - install -D _build/install/default/bin/set-domain-uuid $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/set-domain-uuid - install -D _build/install/default/bin/xenops-cli $(DESTDIR)/$(SBINDIR)/xenops-cli - install -D _build/install/default/man/man1/xenops-cli.1.gz $(DESTDIR)/$(MANDIR)/man1/xenops-cli.1.gz - install -D _build/install/default/bin/list_domains $(DESTDIR)/$(BINDIR)/list_domains - install -D ./ocaml/xenopsd/scripts/vif $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/vif - install -D ./ocaml/xenopsd/scripts/vif-real $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/vif-real - install -D ./ocaml/xenopsd/scripts/block $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/block install -D ./ocaml/xenopsd/scripts/xen-backend.rules $(DESTDIR)/$(ETCDIR)/udev/rules.d/xen-backend.rules - install -D ./ocaml/xenopsd/scripts/tap $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/tap - install -D ./ocaml/xenopsd/scripts/setup-vif-rules $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/setup-vif-rules - install -D ./_build/install/default/bin/pvs-proxy-ovs-setup $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/pvs-proxy-ovs-setup - (cd $(DESTDIR)/$(XENOPSD_LIBEXECDIR) && ln -sf pvs-proxy-ovs-setup setup-pvs-proxy-rules) - install -D ./ocaml/xenopsd/scripts/common.py $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/common.py - install -D ./ocaml/xenopsd/scripts/igmp_query_injector.py $(DESTDIR)/$(XENOPSD_LIBEXECDIR)/igmp_query_injector.py install -D ./ocaml/xenopsd/scripts/qemu-wrapper $(DESTDIR)/$(QEMU_WRAPPER_DIR)/qemu-wrapper install -D ./ocaml/xenopsd/scripts/swtpm-wrapper $(DESTDIR)/$(QEMU_WRAPPER_DIR)/swtpm-wrapper install -D ./ocaml/xenopsd/scripts/pygrub-wrapper $(DESTDIR)/$(QEMU_WRAPPER_DIR)/pygrub-wrapper DESTDIR=$(DESTDIR) SBINDIR=$(SBINDIR) QEMU_WRAPPER_DIR=$(QEMU_WRAPPER_DIR) XENOPSD_LIBEXECDIR=$(XENOPSD_LIBEXECDIR) ETCDIR=$(ETCDIR) ./ocaml/xenopsd/scripts/make-custom-xenopsd.conf -# squeezed - install -D _build/install/default/bin/squeezed $(DESTDIR)/$(SBINDIR)/squeezed -# xcp-networkd - install -m 755 _build/install/default/bin/xapi-networkd $(DESTDIR)/usr/sbin/xcp-networkd - install -m 755 _build/install/default/bin/networkd_db $(DESTDIR)/usr/bin/networkd_db - install -m 644 _build/default/ocaml/networkd/bin/xcp-networkd.1 $(DESTDIR)/usr/share/man/man1/xcp-networkd.1 -# wsproxy - install -m 755 _build/install/default/bin/wsproxy $(DESTDIR)$(LIBEXECDIR)/wsproxy # dune can install libraries and several other files into the right locations dune install --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) \ + --libexecdir=$(XENOPSD_LIBEXECDIR) \ xapi-client xapi-schema xapi-consts xapi-cli-protocol xapi-datamodel xapi-types \ xen-api-client xen-api-client-lwt xen-api-client-async rrdd-plugin rrd-transport \ gzip http-lib pciutil sexpr stunnel uuid xml-light2 zstd xapi-compression safe-resources \ message-switch message-switch-async message-switch-cli message-switch-core message-switch-lwt \ message-switch-unix xapi-idl forkexec xapi-forkexecd xapi-storage xapi-storage-script xapi-storage-cli \ xapi-nbd varstored-guard xapi-log xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix \ - xapi-rrd xapi-inventory clock \ + xapi-rrd xapi-inventory clock xapi-rrdd rrddump xapi-rrd-transport-utils wsproxy xapi-networkd xapi-squeezed xapi-xenopsd-simulator xapi-xenopsd-cli xapi-xenopsd-xc\ xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck -# docs - mkdir -p $(DESTDIR)$(DOCDIR) - cp -r $(XAPIDOC)/jekyll $(DESTDIR)$(DOCDIR) - cp -r $(XAPIDOC)/html $(DESTDIR)$(DOCDIR) - cp -r $(XAPIDOC)/markdown $(DESTDIR)$(DOCDIR) - cp $(XAPIDOC)/*.dot $(XAPIDOC)/doc-convert.sh $(DESTDIR)$(DOCDIR) + dune install --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --datadir=$(DOCDIR) xapi xe rrdd-plugins + dune install --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --bindir=$(OPTDIR)/debug --datadir=$(OPTDIR)/debug xapi-debug + dune install --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --libexecdir=/usr/libexec --mandir=$(MANDIR) vhd-tool + +# wsproxy + mv $(DESTDIR)/usr/bin/wsproxy $(DESTDIR)$(LIBEXECDIR)/wsproxy + (cd $(DESTDIR)/$(XENOPSD_LIBEXECDIR) && ln -sf pvs-proxy-ovs-setup setup-pvs-proxy-rules) # sdk dune install --destdir=$(DESTDIR) --datadir=$(SDKDIR) xapi-sdk + chmod +x $(DESTDIR)$(DOCDIR)/doc-convert.sh + # backward compat with existing specfile, to be removed after it is updated find $(DESTDIR) -name '*.cmxs' -delete + for pkg in rrdd-plugins xapi-debug xapi xe xapi-networkd xapi-xenopsd-cli xapi-xenopsd-simulator xapi-xenopsd-xc xapi-squeezed xapi-rrdd xapi-rrd-transport-utils rrddump wsproxy vhd-tool; do for f in CHANGELOG LICENSE README.markdown; do rm $(DESTDIR)$(OPTDIR)/doc/$$pkg/$$f $(DESTDIR)$(PREFIX)/doc/$$pkg/$$f -f; done; for f in META dune-package opam; do rm $(DESTDIR)$(LIBDIR)/$$pkg/$$f; done; done; uninstall: # only removes what was installed with `dune install` @@ -258,8 +173,11 @@ uninstall: message-switch message-switch-async message-switch-cli message-switch-core message-switch-lwt \ message-switch-unix xapi-idl forkexec xapi-forkexecd xapi-storage xapi-storage-script xapi-log \ xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix \ - xapi-rrd xapi-inventory clock \ + xapi-rrd xapi-inventory clock xapi-rrdd rrddump xapi-rrd-transport-utils wsproxy xapi-networkd xapi-squeezed xapi-xenopsd-simulator xapi-xenopsd-cli\ xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck + dune uninstall --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) xapi xe rrdd-plugins + dune uninstall --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(XENOPSD_LIBEXECDIR) xapi-xenopsd-xc + dune uninstall --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --bindir=$(OPTDIR)/debug --datadir=$(OPTDIR)/debug xapi-debug compile_flags.txt: Makefile (ocamlc -config-var ocamlc_cflags;\ diff --git a/dune-project b/dune-project index 45875bb4565..8aca9cfd45b 100644 --- a/dune-project +++ b/dune-project @@ -322,12 +322,67 @@ (name xapi-cli-protocol) ) +(package + (name xapi-debug) + (synopsis "Debugging tools for XAPI") + (description "Tools installed into the non-standard /opt/xensource/debug location") + (depends + alcotest + angstrom + astring + base64 + cmdliner + cohttp + cstruct + ctypes + domain-name + fd-send-recv + fmt + hex + integers + ipaddr + logs + magic-mime + mirage-crypto + mirage-crypto-pk + mirage-crypto-rng + mtime + pci + polly + ppx_deriving + ppx_deriving_rpc + ppx_sexp_conv + psq + ptime + qcheck-alcotest + qcheck-core + re + result + rpclib + rresult + sexplib + sexplib0 + sha + tar + tar-unix + uri + uuidm + uutf + x509 + xapi-backtrace + xenctrl + xenstore_transport + xmlm + yojson + ) +) + (package (name xapi) (synopsis "The toolstack daemon which implements the XenAPI") (description "This daemon exposes the XenAPI and is used by clients such as 'xe' and 'XenCenter' to manage clusters of Xen-enabled hosts.") (depends - alcotest ; needed for the quicktest binary + (alcotest :with-test) angstrom astring base-threads @@ -530,10 +585,6 @@ This package provides an Lwt compatible interface to the library.") (name rrdd-plugins) ) -(package - (name rrd2csv) -) - (package (name rrd-transport) (synopsis "Shared-memory protocols for exposing system metrics") diff --git a/ocaml/alerts/certificate/dune b/ocaml/alerts/certificate/dune index 137b23d265e..4bd05643b0e 100644 --- a/ocaml/alerts/certificate/dune +++ b/ocaml/alerts/certificate/dune @@ -15,8 +15,6 @@ (executable (modes exe) (name certificate_check_main) - (public_name alert-certificate-check) - (package xapi) (modules certificate_check_main) (libraries certificate_check @@ -28,3 +26,8 @@ ) ) +(install + (files (certificate_check_main.exe as alert-certificate-check)) + (package xapi) + (section libexec_root) +) diff --git a/ocaml/cdrommon/dune b/ocaml/cdrommon/dune index bc57948a8d8..4d86c0ffafa 100644 --- a/ocaml/cdrommon/dune +++ b/ocaml/cdrommon/dune @@ -1,8 +1,6 @@ (executable (modes exe) (name cdrommon) - (public_name cdrommon) - (package xapi) (libraries cdrom threads @@ -11,3 +9,8 @@ ) ) +(install + (files (cdrommon.exe as cdrommon)) + (section libexec_root) + (package xapi) +) diff --git a/ocaml/database/dune b/ocaml/database/dune index 14ac44931bd..48fbd89936f 100644 --- a/ocaml/database/dune +++ b/ocaml/database/dune @@ -57,8 +57,6 @@ (executable (modes exe) (name block_device_io) - (public_name block_device_io) - (package xapi) (modules block_device_io) (libraries @@ -70,6 +68,12 @@ ) ) +(install + (package xapi) + (files (block_device_io.exe as block_device_io)) + (section libexec_root) +) + (executable (name database_server_main) (modes exe) diff --git a/ocaml/doc/dune b/ocaml/doc/dune index 7c3dbcf4f68..aa5077ef404 100644 --- a/ocaml/doc/dune +++ b/ocaml/doc/dune @@ -16,23 +16,47 @@ ) (rule - (alias jsapigen) + (aliases jsapigen xapi-doc) (deps (:x jsapi.exe) (source_tree templates) ) + (targets (dir api) branding.js) (package xapi-datamodel) (action (run %{x})) ) +(rule + (alias xapi-doc) + (package xapi) + (targets (dir jekyll)) + (action (run ../idl/json_backend/gen_json.exe -destdir jekyll)) +) + (rule (alias runtest) (deps (:x jsapi.exe) (source_tree templates) + (sandbox always) ) (package xapi-datamodel) (action (run %{x})) ) (data_only_dirs templates) + +(install + (package xapi) + (section share_root) + (dirs jekyll) + (files + (glob_files_rec (api/* with_prefix html/api)) + (glob_files (*.html with_prefix html)) + (glob_files (*.css with_prefix html)) + (glob_files (*.js with_prefix html)) + (glob_files ([!R]*.md with_prefix markdown)) + (glob_files *.dot) + doc-convert.sh + ) +) diff --git a/ocaml/events/dune b/ocaml/events/dune index bb2b0420399..a08f10c5615 100644 --- a/ocaml/events/dune +++ b/ocaml/events/dune @@ -2,7 +2,7 @@ (modes exe) (name event_listen) (public_name event_listen) - (package xapi) + (package xapi-debug) (libraries http_lib xapi-client diff --git a/ocaml/gencert/dune b/ocaml/gencert/dune index 66a78ca4a41..cbd5cd73ae2 100644 --- a/ocaml/gencert/dune +++ b/ocaml/gencert/dune @@ -28,8 +28,6 @@ (executable (modes exe) (name gencert) - (public_name gencert) - (package xapi) (modules gencert) (libraries astring @@ -41,6 +39,12 @@ ) ) +(install + (files (gencert.exe as gencert)) + (section libexec_root) + (package xapi) +) + (test (name test_lib) (package xapi) diff --git a/ocaml/idl/autogen/management-api.md b/ocaml/idl/autogen-static/management-api.md similarity index 100% rename from ocaml/idl/autogen/management-api.md rename to ocaml/idl/autogen-static/management-api.md diff --git a/ocaml/idl/autogen/dune b/ocaml/idl/autogen/dune deleted file mode 100644 index a423ff4a937..00000000000 --- a/ocaml/idl/autogen/dune +++ /dev/null @@ -1,6 +0,0 @@ -(alias - (name markdowngen) - (deps - (source_tree .) - ) -) diff --git a/ocaml/idl/dune b/ocaml/idl/dune index d971e6597df..84ad1c35a93 100644 --- a/ocaml/idl/dune +++ b/ocaml/idl/dune @@ -36,13 +36,29 @@ ) (rule - (alias markdowngen) + (aliases markdowngen xapi-doc) (deps (:x datamodel_main.exe) (source_tree templates) + (:md autogen-static/management-api.md) ) + (targets (dir autogen)) (package xapi-datamodel) - (action (run %{x} -closed -markdown)) + (action + (progn + (run mkdir -p autogen) + (run %{x} -closed -markdown) + (run cp %{md} autogen/management-api.md) + )) +) + +(install + (package xapi) + (section share_root) + (files + (glob_files (autogen/*.md with_prefix markdown)) + (glob_files (autogen/*.yml with_prefix markdown)) + ) ) (tests diff --git a/ocaml/license/dune b/ocaml/license/dune index e2ee71b2b3f..f37d0695981 100644 --- a/ocaml/license/dune +++ b/ocaml/license/dune @@ -14,8 +14,6 @@ (executable (modes exe) (name daily_license_check_main) - (public_name daily-license-check) - (package xapi) (modules daily_license_check_main) (libraries daily_license_check @@ -27,3 +25,8 @@ ) ) +(install + (files (daily_license_check_main.exe as daily-license-check)) + (package xapi) + (section libexec_root) +) diff --git a/ocaml/networkd/bin/dune b/ocaml/networkd/bin/dune index 2b50b1e4159..c6bfffbbf52 100644 --- a/ocaml/networkd/bin/dune +++ b/ocaml/networkd/bin/dune @@ -11,8 +11,6 @@ (executable (name networkd) - (public_name xapi-networkd) - (package xapi-networkd) (modes exe) (libraries astring @@ -45,3 +43,20 @@ (name man) (deps xcp-networkd.1) ) + +(install + (package xapi-networkd) + (section man) + (files xcp-networkd.1) +) + +(install + (package xapi-networkd) + (section sbin) + (files (networkd.exe as xcp-networkd)) +) + +(alias + (name xapi-doc) + (deps (alias man)) +) diff --git a/ocaml/perftest/dune b/ocaml/perftest/dune index eb5bb586d5c..38d7a0efd16 100644 --- a/ocaml/perftest/dune +++ b/ocaml/perftest/dune @@ -2,7 +2,7 @@ (modes exe) (name perftest) (public_name perftest) - (package xapi) + (package xapi-debug) (libraries http_lib diff --git a/ocaml/quicktest/dune b/ocaml/quicktest/dune index b061ff1176c..0ac6a171acd 100644 --- a/ocaml/quicktest/dune +++ b/ocaml/quicktest/dune @@ -2,7 +2,7 @@ (modes exe) (name quicktest) (public_name quicktestbin) - (package xapi) + (package xapi-debug) (libraries alcotest astring @@ -43,7 +43,6 @@ (preprocess (per_module ((pps ppx_deriving_rpc) Quicktest_vm_lifecycle))) ) - (rule (alias runtest) (package xapi) diff --git a/ocaml/rrd2csv/src/dune b/ocaml/rrd2csv/src/dune index ce263d70a01..28f26f831c2 100644 --- a/ocaml/rrd2csv/src/dune +++ b/ocaml/rrd2csv/src/dune @@ -2,7 +2,7 @@ (modes exe) (name rrd2csv) (public_name rrd2csv) - (package rrd2csv) + (package xapi) (libraries http_lib @@ -17,4 +17,3 @@ xmlm ) ) - diff --git a/ocaml/squeezed/src/dune b/ocaml/squeezed/src/dune index 4db102ad8a0..55dbbf92949 100644 --- a/ocaml/squeezed/src/dune +++ b/ocaml/squeezed/src/dune @@ -1,8 +1,6 @@ (executable (modes exe) (name squeezed) - (public_name squeezed) - (package xapi-squeezed) (flags (:standard -bin-annot)) (libraries xapi-stdext-threads @@ -30,3 +28,9 @@ re.str ) ) + +(install + (package xapi-squeezed) + (section sbin) + (files (squeezed.exe as squeezed)) +) diff --git a/ocaml/vhd-tool/cli/dune b/ocaml/vhd-tool/cli/dune index cb85ba1a1dc..99f73fa7615 100644 --- a/ocaml/vhd-tool/cli/dune +++ b/ocaml/vhd-tool/cli/dune @@ -1,8 +1,6 @@ (executables (modes exe) (names main sparse_dd get_vhd_vsize) - (package vhd-tool) - (public_names vhd-tool sparse_dd get_vhd_vsize) (libraries astring @@ -40,9 +38,33 @@ (action (with-stdout-to %{targets} (run %{x} --help))) ) +; specfile doesn't expect these +;(install +; (package vhd-tool) +; (section man) +; (files vhd-tool.1 sparse_dd.1)) + (install (package vhd-tool) - (section man) - (files vhd-tool.1 sparse_dd.1) + (section bin) + (files (main.exe as vhd-tool)) ) +(install + (package xapi) + (section libexec_root) + (files + (../scripts/get_nbd_extents.py as get_nbd_extents.py) + (../scripts/python_nbd_client.py as python_nbd_client.py) + ) +) + +; xapi's libexec is in /opt/xensource/libexec +; but vhd-tool installs into /usr/libexec/xapi +; we should eventually fix these inconsistencies, +; for now be backwards compatible +(install + (package vhd-tool) + (section libexec_root) + (files (get_vhd_vsize.exe as xapi/get_vhd_vsize) (sparse_dd.exe as xapi/sparse_dd)) +) diff --git a/ocaml/vncproxy/dune b/ocaml/vncproxy/dune index 5e6e1d768d8..97b89628334 100644 --- a/ocaml/vncproxy/dune +++ b/ocaml/vncproxy/dune @@ -2,7 +2,7 @@ (modes exe) (name vncproxy) (public_name vncproxy) - (package xapi) + (package xapi-debug) (libraries http_lib diff --git a/ocaml/wsproxy/cli/dune b/ocaml/wsproxy/cli/dune index 93984851950..65797a35c69 100644 --- a/ocaml/wsproxy/cli/dune +++ b/ocaml/wsproxy/cli/dune @@ -1,3 +1,7 @@ +; we can't install to libexec_root, because the default for that is /lib, +; so a plain dune build @install would fail because now it can no longer use /lib/wsproxy +; so we install it and then move it in the Makefile + (executable (name wsproxy) (public_name wsproxy) diff --git a/ocaml/xapi/dune b/ocaml/xapi/dune index acf3cb5ae66..9f3e5f825fa 100644 --- a/ocaml/xapi/dune +++ b/ocaml/xapi/dune @@ -49,11 +49,12 @@ ) (install - (section share) + (package xapi-debug) + (section share_root) (files rbac_static.csv) - (package xapi) ) + (library (name xapi_internal_minimal) (modules context custom_actions xapi_globs server_helpers session_check rbac rbac_audit db_actions taskHelper eventgen locking_helpers exnHelper rbac_static xapi_role xapi_extensions db) diff --git a/ocaml/xcp-rrdd/bin/read-blktap-stats/dune b/ocaml/xcp-rrdd/bin/read-blktap-stats/dune index 72861b33506..e7793085904 100644 --- a/ocaml/xcp-rrdd/bin/read-blktap-stats/dune +++ b/ocaml/xcp-rrdd/bin/read-blktap-stats/dune @@ -1,8 +1,9 @@ (executable (modes exe) (name read_blktap_stats) - (package rrdd-plugins) - (public_name xcp-rrdd-read-blktap-stats) +; not expected by the specfile +; (package rrdd-plugins) +; (public_name xcp-rrdd-read-blktap-stats) (libraries cstruct rrdd_plugins_libs diff --git a/ocaml/xcp-rrdd/bin/rrdd/dune b/ocaml/xcp-rrdd/bin/rrdd/dune index 9b0dbbbab29..9a0905bb27e 100644 --- a/ocaml/xcp-rrdd/bin/rrdd/dune +++ b/ocaml/xcp-rrdd/bin/rrdd/dune @@ -38,8 +38,6 @@ (executable (modes exe) (name xcp_rrdd) - (public_name xcp-rrdd) - (package xapi-rrdd) (modules xcp_rrdd) (libraries astring @@ -75,3 +73,8 @@ ) ) +(install + (package xapi-rrdd) + (files (xcp_rrdd.exe as xcp-rrdd)) + (section sbin) +) diff --git a/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune b/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune index a6e092c3843..5dd75984e1a 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune @@ -1,8 +1,6 @@ (executable (modes exe) (name rrdp_dcmi) - (package rrdd-plugins) - (public_name xcp-rrdd-dcmi) (libraries rrdd-plugin @@ -14,3 +12,8 @@ ) ) +(install + (package rrdd-plugins) + (files (rrdp_dcmi.exe as xcp-rrdd-plugins/xcp-rrdd-dcmi)) + (section libexec_root) +) diff --git a/ocaml/xcp-rrdd/bin/rrdp-iostat/dune b/ocaml/xcp-rrdd/bin/rrdp-iostat/dune index 1bf3c8ed4f0..cb8afcedf7f 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-iostat/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-iostat/dune @@ -1,8 +1,6 @@ (executable (modes exe) (name rrdp_iostat) - (package rrdd-plugins) - (public_name xcp-rrdd-iostat) (libraries astring cstruct @@ -31,3 +29,8 @@ ) ) +(install + (package rrdd-plugins) + (files (rrdp_iostat.exe as xcp-rrdd-plugins/xcp-rrdd-iostat)) + (section libexec_root) +) diff --git a/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune b/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune index acad6c3dfe9..97000c2ebed 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune @@ -1,8 +1,6 @@ (executable (modes exe) (name rrdp_squeezed) - (package rrdd-plugins) - (public_name xcp-rrdd-squeezed) (libraries rrdd-plugin @@ -20,3 +18,8 @@ ) ) +(install + (package rrdd-plugins) + (files (rrdp_squeezed.exe as xcp-rrdd-plugins/xcp-rrdd-squeezed)) + (section libexec_root) +) diff --git a/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune b/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune index 86340e6796e..2545d45d9f1 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune @@ -1,8 +1,6 @@ (executable (modes exe) (name rrdp_xenpm) - (package rrdd-plugins) - (public_name xcp-rrdd-xenpm) (libraries rrdd-plugin @@ -15,3 +13,8 @@ ) ) +(install + (package rrdd-plugins) + (files (rrdp_xenpm.exe as xcp-rrdd-plugins/xcp-rrdd-xenpm)) + (section libexec_root) +) diff --git a/ocaml/xe-cli/dune b/ocaml/xe-cli/dune index 5362781b31a..9141c1fab07 100644 --- a/ocaml/xe-cli/dune +++ b/ocaml/xe-cli/dune @@ -20,4 +20,3 @@ xapi-stdext-unix ) ) - diff --git a/ocaml/xenopsd/cli/dune b/ocaml/xenopsd/cli/dune index f9cfc7d353a..87dcdf98c8a 100644 --- a/ocaml/xenopsd/cli/dune +++ b/ocaml/xenopsd/cli/dune @@ -3,8 +3,6 @@ (executable (name main) - (public_name xenops-cli) - (package xapi-xenopsd-cli) (libraries astring cmdliner @@ -28,6 +26,12 @@ (preprocess (per_module ((pps ppx_deriving_rpc) Common Xn_cfg_types))) ) +(install + (files (main.exe as xenops-cli)) + (section sbin) + (package xapi-xenopsd-cli) +) + (rule (with-stdout-to xenops-cli.1 diff --git a/ocaml/xenopsd/dbgring/dune b/ocaml/xenopsd/dbgring/dune index c3d29f9e5be..7e50d7f83f6 100644 --- a/ocaml/xenopsd/dbgring/dune +++ b/ocaml/xenopsd/dbgring/dune @@ -1,7 +1,7 @@ (executable (name dbgring) - (public_name dbgring) - (package xapi-xenopsd-xc) +; (public_name dbgring) +; (package xapi-xenopsd-xc) (libraries xapi_xenopsd diff --git a/ocaml/xenopsd/dune b/ocaml/xenopsd/dune deleted file mode 100644 index 389b982cc01..00000000000 --- a/ocaml/xenopsd/dune +++ /dev/null @@ -1 +0,0 @@ -(data_only_dirs scripts) diff --git a/ocaml/xenopsd/pvs/dune b/ocaml/xenopsd/pvs/dune index d8b113392c9..facd232e0f2 100644 --- a/ocaml/xenopsd/pvs/dune +++ b/ocaml/xenopsd/pvs/dune @@ -1,7 +1,13 @@ (executable (name pvs_proxy_setup) - (public_name pvs-proxy-ovs-setup) - (package xapi-xenopsd-xc) (libraries ezxenstore.core bos xapi-consts.xapi_version xapi-idl cmdliner log rresult) ) +(install + (section libexec_root) + (package xapi-xenopsd-xc) + (files + (pvs_proxy_setup.exe as pvs-proxy-ovs-setup) + ) +) + diff --git a/ocaml/xenopsd/scripts/dune b/ocaml/xenopsd/scripts/dune new file mode 100644 index 00000000000..036965c492e --- /dev/null +++ b/ocaml/xenopsd/scripts/dune @@ -0,0 +1,13 @@ +(install + (section libexec_root) + (package xapi-xenopsd-xc) + (files + (vif as vif) + (vif-real as vif-real) + (block as block) + (tap as tap) + (setup-vif-rules as setup-vif-rules) + (common.py as common.py) + (igmp_query_injector.py as igmp_query_injector.py) + ) +) diff --git a/ocaml/xenopsd/simulator/dune b/ocaml/xenopsd/simulator/dune index 9274834e964..295e1b43ab4 100644 --- a/ocaml/xenopsd/simulator/dune +++ b/ocaml/xenopsd/simulator/dune @@ -1,7 +1,5 @@ (executable (name xenops_simulator_main) - (public_name xenopsd-simulator) - (package xapi-xenopsd-simulator) (libraries xapi-idl.xen.interface @@ -9,6 +7,12 @@ ) ) +(install + (files (xenops_simulator_main.exe as xenopsd-simulator)) + (section sbin) + (package xapi-xenopsd-simulator) +) + (rule (with-stdout-to xenopsd-simulator.1 diff --git a/ocaml/xenopsd/suspend_image_viewer/dune b/ocaml/xenopsd/suspend_image_viewer/dune index 47b8ced2a92..706b58bf3f3 100644 --- a/ocaml/xenopsd/suspend_image_viewer/dune +++ b/ocaml/xenopsd/suspend_image_viewer/dune @@ -1,7 +1,7 @@ (executable (public_name suspend-image-viewer) (name suspend_image_viewer) - (package xapi-xenopsd-xc) + (package xapi-debug) (libraries cmdliner forkexec diff --git a/ocaml/xenopsd/tools/dune b/ocaml/xenopsd/tools/dune index fa6d4519b50..4736daa0972 100644 --- a/ocaml/xenopsd/tools/dune +++ b/ocaml/xenopsd/tools/dune @@ -1,6 +1,10 @@ (executable (name set_domain_uuid) - (public_name set-domain-uuid) - (package xapi-xenopsd-xc) (libraries xenctrl uuid cmdliner) ) + +(install + (files (set_domain_uuid.exe as set-domain-uuid)) + (section libexec_root) + (package xapi-xenopsd-xc) +) diff --git a/ocaml/xenopsd/xc/dune b/ocaml/xenopsd/xc/dune index 06ba1a676e5..5ad84eb8097 100644 --- a/ocaml/xenopsd/xc/dune +++ b/ocaml/xenopsd/xc/dune @@ -69,8 +69,6 @@ (executable (name xenops_xc_main) (modes exe) - (public_name xenopsd-xc) - (package xapi-xenopsd-xc) (modules xenops_xc_main) (libraries @@ -107,6 +105,12 @@ ) ) +(install + (files (xenops_xc_main.exe as xenopsd-xc)) + (section sbin) + (package xapi-xenopsd-xc) +) + (executable (name memory_summary) (modes exe) diff --git a/ocaml/xenopsd/xc/fence/dune b/ocaml/xenopsd/xc/fence/dune index 4127982b138..48d0a6a1d0a 100644 --- a/ocaml/xenopsd/xc/fence/dune +++ b/ocaml/xenopsd/xc/fence/dune @@ -1,6 +1,10 @@ (executable (name fence) - (public_name fence.bin) - (package xapi-xenopsd-xc) (libraries xenctrl) ) + +(install + (package xapi) + (section libexec_root) + (files (fence.exe as fence.bin)) +) diff --git a/ocaml/xs-trace/dune b/ocaml/xs-trace/dune index 0be1866b2d0..4e2213ce287 100644 --- a/ocaml/xs-trace/dune +++ b/ocaml/xs-trace/dune @@ -2,7 +2,7 @@ (modes exe) (name xs_trace) (public_name xs-trace) - (package xapi) + (package xapi-xenopsd-xc) (libraries uri tracing @@ -19,8 +19,9 @@ (action (with-stdout-to %{targets} (run %{exe} --help=groff))) ) -(install - (section man) - (package xapi) - (files (xs-trace.1 as man1/xs-trace.1)) -) +; not expected by the specfile +;(install +; (section man) +; (package xapi) +; (files (xs-trace.1 as man1/xs-trace.1)) +;) diff --git a/rrd2csv.opam b/rrd2csv.opam deleted file mode 100644 index 4d71ee4468b..00000000000 --- a/rrd2csv.opam +++ /dev/null @@ -1,29 +0,0 @@ -# This file is generated by dune, edit dune-project instead - -opam-version: "2.0" -name: "rrd2csv" -maintainer: "opam-devel@lists.ocaml.org" -authors: [ "Guillem Rieu" ] -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -build: [ - ["dune" "build" "-p" name "-j" jobs ] - ["dune" "runtest" "-p" name "-j" jobs] {with-test} -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "http-lib" - "xapi-client" - "xapi-idl" - "xapi-rrd" - "xapi-stdext-std" - "xapi-stdext-threads" - "xmlm" -] -synopsis: "Convert XenServer RRD data into CSV format" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/rrd2csv.opam.template b/rrd2csv.opam.template deleted file mode 100644 index e86d004589e..00000000000 --- a/rrd2csv.opam.template +++ /dev/null @@ -1,27 +0,0 @@ -opam-version: "2.0" -name: "rrd2csv" -maintainer: "opam-devel@lists.ocaml.org" -authors: [ "Guillem Rieu" ] -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -build: [ - ["dune" "build" "-p" name "-j" jobs ] - ["dune" "runtest" "-p" name "-j" jobs] {with-test} -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "http-lib" - "xapi-client" - "xapi-idl" - "xapi-rrd" - "xapi-stdext-std" - "xapi-stdext-threads" - "xmlm" -] -synopsis: "Convert XenServer RRD data into CSV format" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi-debug.opam b/xapi-debug.opam new file mode 100644 index 00000000000..5073a267be2 --- /dev/null +++ b/xapi-debug.opam @@ -0,0 +1,76 @@ +# This file is generated by dune, edit dune-project instead +opam-version: "2.0" +synopsis: "Debugging tools for XAPI" +description: + "Tools installed into the non-standard /opt/xensource/debug location" +maintainer: ["Xapi project maintainers"] +authors: ["xen-api@lists.xen.org"] +license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" +homepage: "https://xapi-project.github.io/" +bug-reports: "https://github.com/xapi-project/xen-api/issues" +depends: [ + "dune" {>= "3.15"} + "alcotest" + "angstrom" + "astring" + "base64" + "cmdliner" + "cohttp" + "cstruct" + "ctypes" + "domain-name" + "fd-send-recv" + "fmt" + "hex" + "integers" + "ipaddr" + "logs" + "magic-mime" + "mirage-crypto" + "mirage-crypto-pk" + "mirage-crypto-rng" + "mtime" + "pci" + "polly" + "ppx_deriving" + "ppx_deriving_rpc" + "ppx_sexp_conv" + "psq" + "ptime" + "qcheck-alcotest" + "qcheck-core" + "re" + "result" + "rpclib" + "rresult" + "sexplib" + "sexplib0" + "sha" + "tar" + "tar-unix" + "uri" + "uuidm" + "uutf" + "x509" + "xapi-backtrace" + "xenctrl" + "xenstore_transport" + "xmlm" + "yojson" + "odoc" {with-doc} +] +build: [ + ["dune" "subst"] {dev} + [ + "dune" + "build" + "-p" + name + "-j" + jobs + "@install" + "@runtest" {with-test} + "@doc" {with-doc} + ] +] +dev-repo: "git+https://github.com/xapi-project/xen-api.git" diff --git a/xapi-squeezed.opam b/xapi-squeezed.opam index 7b2de24dd18..7c4995d7e4f 100644 --- a/xapi-squeezed.opam +++ b/xapi-squeezed.opam @@ -26,7 +26,7 @@ depends: [ "xapi-stdext-threads" "xapi-stdext-unix" "xapi-types" - "xenctrl" {>= "0.9.20"} + "xenctrl" {>= "0.9.20" & with-test} "xenstore" "xenstore_transport" ] diff --git a/xapi-squeezed.opam.template b/xapi-squeezed.opam.template index 9641c69858a..aa928933c0d 100644 --- a/xapi-squeezed.opam.template +++ b/xapi-squeezed.opam.template @@ -23,7 +23,7 @@ depends: [ "xapi-stdext-threads" "xapi-stdext-unix" "xapi-types" - "xenctrl" {>= "0.9.20"} + "xenctrl" {>= "0.9.20" & with-test} "xenstore" "xenstore_transport" ] diff --git a/xapi-xenopsd-xc.opam b/xapi-xenopsd-xc.opam index 1ebd0e0073c..9b2f6771bc4 100644 --- a/xapi-xenopsd-xc.opam +++ b/xapi-xenopsd-xc.opam @@ -26,17 +26,17 @@ depends: [ "conf-xen" "ezxenstore" "fd-send-recv" - "fmt" + "fmt" {with-test} "forkexec" "inotify" "mtime" "polly" - "ppx_deriving_rpc" + "ppx_deriving_rpc" {with-test} "ppx_sexp_conv" "qmp" "re" - "result" - "rpclib" + "result" {with-test} + "rpclib" {with-test} "rresult" "sexplib" "sexplib0" @@ -55,7 +55,7 @@ depends: [ "xapi-test-utils" {with-test} "xenctrl" "xenstore" - "xenstore_transport" + "xenstore_transport" {with-test} "xenmmap" "xmlm" ] diff --git a/xapi-xenopsd-xc.opam.template b/xapi-xenopsd-xc.opam.template index e0dd497cde1..92b68a4f1a7 100644 --- a/xapi-xenopsd-xc.opam.template +++ b/xapi-xenopsd-xc.opam.template @@ -24,17 +24,17 @@ depends: [ "conf-xen" "ezxenstore" "fd-send-recv" - "fmt" + "fmt" {with-test} "forkexec" "inotify" "mtime" "polly" - "ppx_deriving_rpc" + "ppx_deriving_rpc" {with-test} "ppx_sexp_conv" "qmp" "re" - "result" - "rpclib" + "result" {with-test} + "rpclib" {with-test} "rresult" "sexplib" "sexplib0" @@ -53,7 +53,7 @@ depends: [ "xapi-test-utils" {with-test} "xenctrl" "xenstore" - "xenstore_transport" + "xenstore_transport" {with-test} "xenmmap" "xmlm" ] diff --git a/xapi.opam b/xapi.opam index e533b5127eb..3a850a4e359 100644 --- a/xapi.opam +++ b/xapi.opam @@ -10,7 +10,7 @@ homepage: "https://xapi-project.github.io/" bug-reports: "https://github.com/xapi-project/xen-api/issues" depends: [ "dune" {>= "3.15"} - "alcotest" + "alcotest" {with-test} "angstrom" "astring" "base-threads" From d35d9a635975691ed9eaeecd903486b93ebab2c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 13 Sep 2024 15:14:14 +0100 Subject: [PATCH 019/141] CP-51479: [maintenance]: merge dune rules and use -j consistently MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit compare with: ``` rm /tmp/inst/2 -rf; make build install DESTDIR=/tmp/inst/2 && (cd /tmp/inst/2 && find ! -type d -printf '%p %M %8s\n' ) >|this && sort -u ref >|refs && sort -u this >|thiss && diff -wu refs thiss && echo OK ``` Signed-off-by: Edwin Török --- Makefile | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/Makefile b/Makefile index 64397752bdf..086cc8ade72 100644 --- a/Makefile +++ b/Makefile @@ -15,8 +15,7 @@ build: # if available use external file, otherwise use built-in, this allows building XAPI without being root ! test -f $(SHAREDIR)/sm/XE_SR_ERRORCODES.xml || cp $(SHAREDIR)/sm/XE_SR_ERRORCODES.xml ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml dune build @ocaml/idl/update-dm-lifecycle -j $(JOBS) --profile=$(PROFILE) --auto-promote || dune build @ocaml/idl/update-dm-lifecycle -j $(JOBS) --profile=$(PROFILE) --auto-promote - dune build @install -j $(JOBS) --profile=$(PROFILE) - dune build @ocaml/xapi-storage/python/xapi/storage/api/v5/python --profile=$(PROFILE) + dune build -j $(JOBS) --profile=$(PROFILE) @install @ocaml/xapi-storage/python/xapi/storage/api/v5/python @ocaml/xapi-doc @ocaml/sdk-gen/sdkgen # Quickly verify that the code compiles, without actually building it check: @@ -69,8 +68,7 @@ test: PSTREE_SLEEP_PID=$$!; \ trap "kill $${PSTREE_SLEEP_PID}" INT TERM EXIT; \ timeout --foreground $(TEST_TIMEOUT2) \ - dune runtest --profile=$(PROFILE) --error-reporting=twice -j $(JOBS) - dune build @runtest-python --profile=$(PROFILE) + dune build --profile=$(PROFILE) --error-reporting=twice -j $(JOBS) @runtest @runtest-python stresstest: dune build @stresstest --profile=$(PROFILE) --no-buffer -j $(JOBS) @@ -107,7 +105,7 @@ format: quality-gate: ./quality-gate.sh -install: build doc sdk doc-json +install: build doc doc-json mkdir -p $(DESTDIR)$(OPTDIR)/bin mkdir -p $(DESTDIR)$(OPTMANDIR) mkdir -p $(DESTDIR)$(LIBEXECDIR) @@ -140,29 +138,26 @@ install: build doc sdk doc-json install -D ./ocaml/xenopsd/scripts/pygrub-wrapper $(DESTDIR)/$(QEMU_WRAPPER_DIR)/pygrub-wrapper DESTDIR=$(DESTDIR) SBINDIR=$(SBINDIR) QEMU_WRAPPER_DIR=$(QEMU_WRAPPER_DIR) XENOPSD_LIBEXECDIR=$(XENOPSD_LIBEXECDIR) ETCDIR=$(ETCDIR) ./ocaml/xenopsd/scripts/make-custom-xenopsd.conf # dune can install libraries and several other files into the right locations - dune install --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) \ - --libexecdir=$(XENOPSD_LIBEXECDIR) \ + dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) \ + --libexecdir=$(XENOPSD_LIBEXECDIR) --datadir=$(SDKDIR) \ xapi-client xapi-schema xapi-consts xapi-cli-protocol xapi-datamodel xapi-types \ xen-api-client xen-api-client-lwt xen-api-client-async rrdd-plugin rrd-transport \ gzip http-lib pciutil sexpr stunnel uuid xml-light2 zstd xapi-compression safe-resources \ message-switch message-switch-async message-switch-cli message-switch-core message-switch-lwt \ message-switch-unix xapi-idl forkexec xapi-forkexecd xapi-storage xapi-storage-script xapi-storage-cli \ xapi-nbd varstored-guard xapi-log xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix \ - xapi-rrd xapi-inventory clock xapi-rrdd rrddump xapi-rrd-transport-utils wsproxy xapi-networkd xapi-squeezed xapi-xenopsd-simulator xapi-xenopsd-cli xapi-xenopsd-xc\ + xapi-rrd xapi-inventory clock xapi-rrdd rrddump xapi-rrd-transport-utils wsproxy xapi-networkd xapi-squeezed xapi-xenopsd-simulator xapi-xenopsd-cli xapi-xenopsd-xc xapi-sdk\ xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck - dune install --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --datadir=$(DOCDIR) xapi xe rrdd-plugins - dune install --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --bindir=$(OPTDIR)/debug --datadir=$(OPTDIR)/debug xapi-debug - dune install --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --libexecdir=/usr/libexec --mandir=$(MANDIR) vhd-tool - + dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --datadir=$(DOCDIR) xapi xe rrdd-plugins + dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --bindir=$(OPTDIR)/debug --datadir=$(OPTDIR)/debug xapi-debug + dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --libexecdir=/usr/libexec --mandir=$(MANDIR) vhd-tool # wsproxy mv $(DESTDIR)/usr/bin/wsproxy $(DESTDIR)$(LIBEXECDIR)/wsproxy (cd $(DESTDIR)/$(XENOPSD_LIBEXECDIR) && ln -sf pvs-proxy-ovs-setup setup-pvs-proxy-rules) -# sdk - dune install --destdir=$(DESTDIR) --datadir=$(SDKDIR) xapi-sdk chmod +x $(DESTDIR)$(DOCDIR)/doc-convert.sh # backward compat with existing specfile, to be removed after it is updated find $(DESTDIR) -name '*.cmxs' -delete - for pkg in rrdd-plugins xapi-debug xapi xe xapi-networkd xapi-xenopsd-cli xapi-xenopsd-simulator xapi-xenopsd-xc xapi-squeezed xapi-rrdd xapi-rrd-transport-utils rrddump wsproxy vhd-tool; do for f in CHANGELOG LICENSE README.markdown; do rm $(DESTDIR)$(OPTDIR)/doc/$$pkg/$$f $(DESTDIR)$(PREFIX)/doc/$$pkg/$$f -f; done; for f in META dune-package opam; do rm $(DESTDIR)$(LIBDIR)/$$pkg/$$f; done; done; + for pkg in rrdd-plugins xapi-debug xapi xe xapi-networkd xapi-xenopsd-cli xapi-xenopsd-simulator xapi-xenopsd-xc xapi-squeezed xapi-rrdd xapi-rrd-transport-utils rrddump wsproxy xapi-sdk vhd-tool; do for f in CHANGELOG LICENSE README.markdown; do rm $(DESTDIR)$(OPTDIR)/doc/$$pkg/$$f $(DESTDIR)$(PREFIX)/doc/$$pkg/$$f -f; done; for f in META dune-package opam; do rm $(DESTDIR)$(LIBDIR)/$$pkg/$$f -f; done; done; uninstall: # only removes what was installed with `dune install` From 44d7afa360e5c0cb57c9a7ce9286b42a414e971f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Sat, 14 Sep 2024 11:29:14 +0100 Subject: [PATCH 020/141] CP-51479: [maintenance]: parallelize install rule MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The dune build runs can't be parallelized, but the python and install.sh invocations and the `dune install` invocations can. Also remove the dependency between 'build' and 'install', if desired during development one can run `make build install`, however for the .spec build it is better to have a strict separation between build and install phases (this ensures that the install phase doesn't rebuild things by accident). Signed-off-by: Edwin Török --- Makefile | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 086cc8ade72..453f1064764 100644 --- a/Makefile +++ b/Makefile @@ -105,7 +105,17 @@ format: quality-gate: ./quality-gate.sh -install: build doc doc-json +.PHONY: install-scripts install-python3 install-dune1 install-dune2 install-dune3 install-dune4 install-extra + +install-scripts: + $(MAKE) -C scripts install + +install-python3: + $(MAKE) -C python3 install + +install-parallel: install-dune1 install-dune2 install-dune3 install-dune4 install-scripts install-python3 install-extra + +install-extra: mkdir -p $(DESTDIR)$(OPTDIR)/bin mkdir -p $(DESTDIR)$(OPTMANDIR) mkdir -p $(DESTDIR)$(LIBEXECDIR) @@ -116,8 +126,6 @@ install: build doc doc-json mkdir -p $(DESTDIR)/etc mkdir -p $(DESTDIR)/etc/bash_completion.d # ocaml/xapi - $(MAKE) -C scripts install - $(MAKE) -C python3 install scripts/install.sh 755 ocaml/quicktest/quicktest $(DESTDIR)$(OPTDIR)/debug # ocaml/xe-cli ln -sf $(OPTDIR)/bin/xe $(DESTDIR)/usr/bin/xe @@ -137,6 +145,8 @@ install: build doc doc-json install -D ./ocaml/xenopsd/scripts/swtpm-wrapper $(DESTDIR)/$(QEMU_WRAPPER_DIR)/swtpm-wrapper install -D ./ocaml/xenopsd/scripts/pygrub-wrapper $(DESTDIR)/$(QEMU_WRAPPER_DIR)/pygrub-wrapper DESTDIR=$(DESTDIR) SBINDIR=$(SBINDIR) QEMU_WRAPPER_DIR=$(QEMU_WRAPPER_DIR) XENOPSD_LIBEXECDIR=$(XENOPSD_LIBEXECDIR) ETCDIR=$(ETCDIR) ./ocaml/xenopsd/scripts/make-custom-xenopsd.conf + +install-dune1: # dune can install libraries and several other files into the right locations dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) \ --libexecdir=$(XENOPSD_LIBEXECDIR) --datadir=$(SDKDIR) \ @@ -148,9 +158,18 @@ install: build doc doc-json xapi-nbd varstored-guard xapi-log xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix \ xapi-rrd xapi-inventory clock xapi-rrdd rrddump xapi-rrd-transport-utils wsproxy xapi-networkd xapi-squeezed xapi-xenopsd-simulator xapi-xenopsd-cli xapi-xenopsd-xc xapi-sdk\ xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck + +install-dune2: dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --datadir=$(DOCDIR) xapi xe rrdd-plugins + +install-dune3: dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --bindir=$(OPTDIR)/debug --datadir=$(OPTDIR)/debug xapi-debug + +install-dune4: dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --libexecdir=/usr/libexec --mandir=$(MANDIR) vhd-tool + +install: + $(MAKE) -j $(JOBS) install-parallel # wsproxy mv $(DESTDIR)/usr/bin/wsproxy $(DESTDIR)$(LIBEXECDIR)/wsproxy (cd $(DESTDIR)/$(XENOPSD_LIBEXECDIR) && ln -sf pvs-proxy-ovs-setup setup-pvs-proxy-rules) @@ -159,6 +178,7 @@ install: build doc doc-json find $(DESTDIR) -name '*.cmxs' -delete for pkg in rrdd-plugins xapi-debug xapi xe xapi-networkd xapi-xenopsd-cli xapi-xenopsd-simulator xapi-xenopsd-xc xapi-squeezed xapi-rrdd xapi-rrd-transport-utils rrddump wsproxy xapi-sdk vhd-tool; do for f in CHANGELOG LICENSE README.markdown; do rm $(DESTDIR)$(OPTDIR)/doc/$$pkg/$$f $(DESTDIR)$(PREFIX)/doc/$$pkg/$$f -f; done; for f in META dune-package opam; do rm $(DESTDIR)$(LIBDIR)/$$pkg/$$f -f; done; done; + uninstall: # only removes what was installed with `dune install` dune uninstall --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) \ From 371207df51b89c1c763a5baa13d1c910d2a689bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Sun, 15 Sep 2024 11:06:47 +0100 Subject: [PATCH 021/141] CP-51479: [maintenance]: merge rrdd-plugins with xapi, and remove xapi-rrdd-plugin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit They use the same install destination. xapi-rrdd-plugin was never installed, so remove the public-name and opam package (but keep the sources, they might be useful as an example). TODO: opam dune lint Signed-off-by: Edwin Török --- Makefile | 6 ++-- dune-project | 8 ----- ocaml/xcp-rrdd/bin/read-blktap-stats/dune | 2 +- ocaml/xcp-rrdd/bin/rrdp-dcmi/dune | 2 +- ocaml/xcp-rrdd/bin/rrdp-dummy/dune | 3 +- ocaml/xcp-rrdd/bin/rrdp-iostat/dune | 2 +- ocaml/xcp-rrdd/bin/rrdp-squeezed/dune | 2 +- ocaml/xcp-rrdd/bin/rrdp-xenpm/dune | 2 +- rrdd-plugins.opam | 36 ----------------------- rrdd-plugins.opam.template | 34 --------------------- xapi-rrdd-plugin.opam | 31 ------------------- xapi-rrdd-plugin.opam.template | 15 ---------- 12 files changed, 9 insertions(+), 134 deletions(-) delete mode 100644 rrdd-plugins.opam delete mode 100644 rrdd-plugins.opam.template delete mode 100644 xapi-rrdd-plugin.opam delete mode 100644 xapi-rrdd-plugin.opam.template diff --git a/Makefile b/Makefile index 453f1064764..647427b8677 100644 --- a/Makefile +++ b/Makefile @@ -160,7 +160,7 @@ install-dune1: xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck install-dune2: - dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --datadir=$(DOCDIR) xapi xe rrdd-plugins + dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --datadir=$(DOCDIR) xapi xe install-dune3: dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --bindir=$(OPTDIR)/debug --datadir=$(OPTDIR)/debug xapi-debug @@ -176,7 +176,7 @@ install: chmod +x $(DESTDIR)$(DOCDIR)/doc-convert.sh # backward compat with existing specfile, to be removed after it is updated find $(DESTDIR) -name '*.cmxs' -delete - for pkg in rrdd-plugins xapi-debug xapi xe xapi-networkd xapi-xenopsd-cli xapi-xenopsd-simulator xapi-xenopsd-xc xapi-squeezed xapi-rrdd xapi-rrd-transport-utils rrddump wsproxy xapi-sdk vhd-tool; do for f in CHANGELOG LICENSE README.markdown; do rm $(DESTDIR)$(OPTDIR)/doc/$$pkg/$$f $(DESTDIR)$(PREFIX)/doc/$$pkg/$$f -f; done; for f in META dune-package opam; do rm $(DESTDIR)$(LIBDIR)/$$pkg/$$f -f; done; done; + for pkg in xapi-debug xapi xe xapi-networkd xapi-xenopsd-cli xapi-xenopsd-simulator xapi-xenopsd-xc xapi-squeezed xapi-rrdd xapi-rrd-transport-utils rrddump wsproxy xapi-sdk vhd-tool; do for f in CHANGELOG LICENSE README.markdown; do rm $(DESTDIR)$(OPTDIR)/doc/$$pkg/$$f $(DESTDIR)$(PREFIX)/doc/$$pkg/$$f -f; done; for f in META dune-package opam; do rm $(DESTDIR)$(LIBDIR)/$$pkg/$$f -f; done; done; uninstall: @@ -190,7 +190,7 @@ uninstall: xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix \ xapi-rrd xapi-inventory clock xapi-rrdd rrddump xapi-rrd-transport-utils wsproxy xapi-networkd xapi-squeezed xapi-xenopsd-simulator xapi-xenopsd-cli\ xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck - dune uninstall --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) xapi xe rrdd-plugins + dune uninstall --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) xapi xe dune uninstall --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(XENOPSD_LIBEXECDIR) xapi-xenopsd-xc dune uninstall --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --bindir=$(OPTDIR)/debug --datadir=$(OPTDIR)/debug xapi-debug diff --git a/dune-project b/dune-project index 8aca9cfd45b..4dab0145859 100644 --- a/dune-project +++ b/dune-project @@ -37,10 +37,6 @@ ) ) -(package - (name xapi-rrdd-plugin) -) - (package (name xml-light2) ) @@ -581,10 +577,6 @@ This package provides an Lwt compatible interface to the library.") (name rrddump) ) -(package - (name rrdd-plugins) -) - (package (name rrd-transport) (synopsis "Shared-memory protocols for exposing system metrics") diff --git a/ocaml/xcp-rrdd/bin/read-blktap-stats/dune b/ocaml/xcp-rrdd/bin/read-blktap-stats/dune index e7793085904..71d116d12d6 100644 --- a/ocaml/xcp-rrdd/bin/read-blktap-stats/dune +++ b/ocaml/xcp-rrdd/bin/read-blktap-stats/dune @@ -2,7 +2,7 @@ (modes exe) (name read_blktap_stats) ; not expected by the specfile -; (package rrdd-plugins) +; (package xapi) ; (public_name xcp-rrdd-read-blktap-stats) (libraries cstruct diff --git a/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune b/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune index 5dd75984e1a..971c2b3426b 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-dcmi/dune @@ -13,7 +13,7 @@ ) (install - (package rrdd-plugins) + (package xapi) (files (rrdp_dcmi.exe as xcp-rrdd-plugins/xcp-rrdd-dcmi)) (section libexec_root) ) diff --git a/ocaml/xcp-rrdd/bin/rrdp-dummy/dune b/ocaml/xcp-rrdd/bin/rrdp-dummy/dune index c3ff89a1c35..758f76805da 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-dummy/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-dummy/dune @@ -1,7 +1,6 @@ (executable + (name rrdp_dummy) (modes exe) - (public_name rrdp_dummy) - (package xapi-rrdd-plugin) (libraries rrdd-plugin diff --git a/ocaml/xcp-rrdd/bin/rrdp-iostat/dune b/ocaml/xcp-rrdd/bin/rrdp-iostat/dune index cb8afcedf7f..4721f71aed1 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-iostat/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-iostat/dune @@ -30,7 +30,7 @@ ) (install - (package rrdd-plugins) + (package xapi) (files (rrdp_iostat.exe as xcp-rrdd-plugins/xcp-rrdd-iostat)) (section libexec_root) ) diff --git a/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune b/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune index 97000c2ebed..d45dd928de1 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-squeezed/dune @@ -19,7 +19,7 @@ ) (install - (package rrdd-plugins) + (package xapi) (files (rrdp_squeezed.exe as xcp-rrdd-plugins/xcp-rrdd-squeezed)) (section libexec_root) ) diff --git a/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune b/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune index 2545d45d9f1..8eb5191fbd6 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-xenpm/dune @@ -14,7 +14,7 @@ ) (install - (package rrdd-plugins) + (package xapi) (files (rrdp_xenpm.exe as xcp-rrdd-plugins/xcp-rrdd-xenpm)) (section libexec_root) ) diff --git a/rrdd-plugins.opam b/rrdd-plugins.opam deleted file mode 100644 index 4ca427e4561..00000000000 --- a/rrdd-plugins.opam +++ /dev/null @@ -1,36 +0,0 @@ -# This file is generated by dune, edit dune-project instead - -opam-version: "2.0" -name: "rrdd-plugins" -maintainer: "xs-devel@lists.xenserver.org" -authors: [ "xs-devel@lists.xenserver.org" ] -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -build: [[ "dune" "build" "-p" name "-j" jobs ]] -synopsis: "Plugins registering to the RRD daemon and exposing various metrics" -depends: [ - "ocaml" - "dune" {>= "3.15"} - "astring" - "base-threads" - "base-unix" - "cstruct" - "cstruct-unix" - "ezxenstore" - "inotify" - "ppx_cstruct" - "rrdd-plugin" - "stringext" - "uuid" - "xapi-stdext-std" - "xapi-stdext-unix" - "xenctrl" - "xenstore" - "xenstore_transport" - "mtime" -] -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/rrdd-plugins.opam.template b/rrdd-plugins.opam.template deleted file mode 100644 index 218b5f3c7bc..00000000000 --- a/rrdd-plugins.opam.template +++ /dev/null @@ -1,34 +0,0 @@ -opam-version: "2.0" -name: "rrdd-plugins" -maintainer: "xs-devel@lists.xenserver.org" -authors: [ "xs-devel@lists.xenserver.org" ] -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -build: [[ "dune" "build" "-p" name "-j" jobs ]] -synopsis: "Plugins registering to the RRD daemon and exposing various metrics" -depends: [ - "ocaml" - "dune" {>= "3.15"} - "astring" - "base-threads" - "base-unix" - "cstruct" - "cstruct-unix" - "ezxenstore" - "inotify" - "ppx_cstruct" - "rrdd-plugin" - "stringext" - "uuid" - "xapi-stdext-std" - "xapi-stdext-unix" - "xenctrl" - "xenstore" - "xenstore_transport" - "mtime" -] -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi-rrdd-plugin.opam b/xapi-rrdd-plugin.opam deleted file mode 100644 index e102355bf9c..00000000000 --- a/xapi-rrdd-plugin.opam +++ /dev/null @@ -1,31 +0,0 @@ -# This file is generated by dune, edit dune-project instead -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -build: [ - ["dune" "subst"] {dev} - [ - "dune" - "build" - "-p" - name - "-j" - jobs - "@install" - "@runtest" {with-test} - "@doc" {with-doc} - ] -] -opam-version: "2.0" -maintainer: "xen-api@lists.xen.org" -authors: "xen-api@lists.xen.org" -homepage: "https://xapi-project.github.io/" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -depends: ["ocaml" "rrdd-plugin" "xenstore" "xenstore_transport" "dune" {>= "3.15"}] -synopsis: "A plugin library for the xapi performance monitoring daemon" -description: """ -This library allows one to expose a datasource which can then be -sampled by the performance monitoring daemon.""" -url { - src: - "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi-rrdd-plugin.opam.template b/xapi-rrdd-plugin.opam.template deleted file mode 100644 index 0eaa9df6f8f..00000000000 --- a/xapi-rrdd-plugin.opam.template +++ /dev/null @@ -1,15 +0,0 @@ -opam-version: "2.0" -maintainer: "xen-api@lists.xen.org" -authors: "xen-api@lists.xen.org" -homepage: "https://xapi-project.github.io/" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -depends: ["ocaml" "rrdd-plugin" "xenstore" "xenstore_transport" "dune" {>= "3.15"}] -synopsis: "A plugin library for the xapi performance monitoring daemon" -description: """ -This library allows one to expose a datasource which can then be -sampled by the performance monitoring daemon.""" -url { - src: - "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} From 2f4d066c900bc4c487629220638d8f2fef371940 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Sun, 15 Sep 2024 11:11:35 +0100 Subject: [PATCH 022/141] CP-51479: [maintenance]: join CLI/daemon packages into xapi-tools MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These weren't being installed previously during a spec build, and had to be 'rm'-ed in the Makefile. Join them into a single package instead to reduce the number of opam files. Signed-off-by: Edwin Török --- Makefile | 12 +-- dune-project | 148 ++++++-------------------- ocaml/networkd/bin/dune | 4 +- ocaml/networkd/bin_db/dune | 2 +- ocaml/networkd/test/dune | 2 +- ocaml/squeezed/src/dune | 2 +- ocaml/squeezed/test/dune | 2 +- ocaml/tests/dune | 4 +- ocaml/wsproxy/cli/dune | 2 +- ocaml/wsproxy/test/dune | 2 +- ocaml/xapi-idl/rrd/dune | 4 +- ocaml/xcp-rrdd/bin/rrdd/dune | 2 +- ocaml/xcp-rrdd/bin/rrddump/dune | 2 +- ocaml/xcp-rrdd/bin/transport-rw/dune | 2 +- ocaml/xcp-rrdd/test/rrdd/dune | 2 +- ocaml/xenopsd/cli/dune | 4 +- ocaml/xenopsd/dbgring/dune | 2 +- ocaml/xenopsd/list_domains/dune | 2 +- ocaml/xenopsd/pvs/dune | 2 +- ocaml/xenopsd/scripts/dune | 2 +- ocaml/xenopsd/simulator/dune | 4 +- ocaml/xenopsd/test/dune | 4 +- ocaml/xenopsd/tools/dune | 2 +- ocaml/xenopsd/xc/dune | 4 +- ocaml/xs-trace/dune | 2 +- rrddump.opam | 16 --- rrddump.opam.template | 14 --- wsproxy.opam | 35 ------ xapi-rrd-transport-utils.opam | 34 ------ xapi-rrdd.opam | 57 ---------- xapi-squeezed.opam | 40 ------- xapi-squeezed.opam.template | 37 ------- xapi-networkd.opam => xapi-tools.opam | 34 +++--- xapi-xenopsd-cli.opam | 36 ------- xapi-xenopsd-cli.opam.template | 34 ------ xapi-xenopsd-simulator.opam | 40 ------- xapi-xenopsd-simulator.opam.template | 38 ------- xapi-xenopsd-xc.opam | 67 ------------ xapi-xenopsd-xc.opam.template | 65 ----------- 39 files changed, 85 insertions(+), 682 deletions(-) delete mode 100644 rrddump.opam delete mode 100644 rrddump.opam.template delete mode 100644 wsproxy.opam delete mode 100644 xapi-rrd-transport-utils.opam delete mode 100644 xapi-rrdd.opam delete mode 100644 xapi-squeezed.opam delete mode 100644 xapi-squeezed.opam.template rename xapi-networkd.opam => xapi-tools.opam (63%) delete mode 100644 xapi-xenopsd-cli.opam delete mode 100644 xapi-xenopsd-cli.opam.template delete mode 100644 xapi-xenopsd-simulator.opam delete mode 100644 xapi-xenopsd-simulator.opam.template delete mode 100644 xapi-xenopsd-xc.opam delete mode 100644 xapi-xenopsd-xc.opam.template diff --git a/Makefile b/Makefile index 647427b8677..29395da6340 100644 --- a/Makefile +++ b/Makefile @@ -156,8 +156,8 @@ install-dune1: message-switch message-switch-async message-switch-cli message-switch-core message-switch-lwt \ message-switch-unix xapi-idl forkexec xapi-forkexecd xapi-storage xapi-storage-script xapi-storage-cli \ xapi-nbd varstored-guard xapi-log xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix \ - xapi-rrd xapi-inventory clock xapi-rrdd rrddump xapi-rrd-transport-utils wsproxy xapi-networkd xapi-squeezed xapi-xenopsd-simulator xapi-xenopsd-cli xapi-xenopsd-xc xapi-sdk\ - xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck + xapi-rrd xapi-inventory clock xapi-sdk\ + xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck xapi-tools install-dune2: dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --datadir=$(DOCDIR) xapi xe @@ -176,7 +176,7 @@ install: chmod +x $(DESTDIR)$(DOCDIR)/doc-convert.sh # backward compat with existing specfile, to be removed after it is updated find $(DESTDIR) -name '*.cmxs' -delete - for pkg in xapi-debug xapi xe xapi-networkd xapi-xenopsd-cli xapi-xenopsd-simulator xapi-xenopsd-xc xapi-squeezed xapi-rrdd xapi-rrd-transport-utils rrddump wsproxy xapi-sdk vhd-tool; do for f in CHANGELOG LICENSE README.markdown; do rm $(DESTDIR)$(OPTDIR)/doc/$$pkg/$$f $(DESTDIR)$(PREFIX)/doc/$$pkg/$$f -f; done; for f in META dune-package opam; do rm $(DESTDIR)$(LIBDIR)/$$pkg/$$f -f; done; done; + for pkg in xapi-debug xapi xe xapi-tools xapi-sdk vhd-tool; do for f in CHANGELOG LICENSE README.markdown; do rm $(DESTDIR)$(OPTDIR)/doc/$$pkg/$$f $(DESTDIR)$(PREFIX)/doc/$$pkg/$$f -f; done; for f in META dune-package opam; do rm $(DESTDIR)$(LIBDIR)/$$pkg/$$f -f; done; done; uninstall: @@ -188,10 +188,10 @@ uninstall: message-switch message-switch-async message-switch-cli message-switch-core message-switch-lwt \ message-switch-unix xapi-idl forkexec xapi-forkexecd xapi-storage xapi-storage-script xapi-log \ xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix \ - xapi-rrd xapi-inventory clock xapi-rrdd rrddump xapi-rrd-transport-utils wsproxy xapi-networkd xapi-squeezed xapi-xenopsd-simulator xapi-xenopsd-cli\ - xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck + xapi-rrd xapi-inventory clock xapi-sdk\ + xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck xapi-tools dune uninstall --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) xapi xe - dune uninstall --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(XENOPSD_LIBEXECDIR) xapi-xenopsd-xc + dune uninstall --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(XENOPSD_LIBEXECDIR) xapi-tools dune uninstall --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --bindir=$(OPTDIR)/debug --datadir=$(OPTDIR)/debug xapi-debug compile_flags.txt: Makefile diff --git a/dune-project b/dune-project index 4dab0145859..09d6dc29894 100644 --- a/dune-project +++ b/dune-project @@ -89,18 +89,6 @@ (name xe) ) -(package - (name xapi-xenopsd-xc) -) - -(package - (name xapi-xenopsd-simulator) -) - -(package - (name xapi-xenopsd-cli) -) - (package (name xapi-types) ) @@ -158,10 +146,6 @@ (name xapi-storage) ) -(package - (name xapi-squeezed) -) - (package (name xapi-schema) ) @@ -186,90 +170,10 @@ ) ) -(package - (name xapi-rrdd) - (synopsis "Performance monitoring daemon for xapi") - (description "This daemon monitors 'datasources' i.e. time-varying values such as performance counters and records the samples in RRD archives. These archives can be used to examine historical performance trends.") - (depends - (ocaml (>= "4.02.0")) - (alcotest :with-test) - astring - cmdliner - (fmt :with-test) - (gzip (= :version)) - (http-lib (= :version)) - inotify - io-page - ipaddr - mtime - polly - ppx_deriving_rpc - rpclib - uri - (ezxenstore (= :version)) - (uuid (= :version)) - xapi-backtrace - (xapi-idl (= :version)) - (xapi-rrd (= :version)) - (xapi-stdext-threads (= :version)) - (xapi-stdext-unix (= :version)) - xapi-tracing - xenctrl - xenstore - xenstore_transport - xmlm - yojson - ) -) - -(package - (name xapi-rrd-transport-utils) - (synopsis "Shared-memory protocols for exposing performance counters") - (description "VMs running on a Xen host can use this library to expose performance counters which can be sampled by the xapi performance monitoring daemon.") - (authors "John Else") - (depends - ocaml - cmdliner - (rrd-transport (= :version)) - (xapi-idl (= :version)) - (xapi-rrd (= :version)) - ) -) - (package (name xapi-open-uri) ) -(package - (name xapi-networkd) - (authors "Jon Ludlam") - (synopsis "The XCP networking daemon") - (depends - (alcotest :with-test) - astring - base-threads - (forkexec (= :version)) - (http-lib (= :version)) - integers - mtime - netlink - re - result - rresult - rpclib - uri - (xapi-idl (= :version)) - xapi-inventory - (xapi-stdext-pervasives (= :version)) - (xapi-stdext-std (= :version)) - (xapi-stdext-threads (= :version)) - (xapi-stdext-unix (= :version)) - xapi-test-utils - (xen-api-client (= :version)) - yojson - ) -) - (package (name xapi-nbd) ) @@ -373,6 +277,37 @@ ) ) +(package + (name xapi-tools) + (synopsis "Various daemons and CLI applications required by XAPI") + (description "Includes message-switch, xenopsd, forkexecd, ...") + (depends + astring + base64 + cmdliner + cstruct-unix + fmt + logs + lwt + mtime + netlink + qmp + re + result + rpclib + rresult + uri + xenctrl + xmlm + yojson + (alcotest :with-test) + (ppx_deriving_rpc :with-test) + (qcheck-core :with-test) + (xapi-test-utils :with-test) + (xenstore_transport :with-test) + ) +) + (package (name xapi) (synopsis "The toolstack daemon which implements the XenAPI") @@ -462,23 +397,6 @@ ) ) -(package - (name wsproxy) - (synopsis "Websockets proxy for VNC traffic") - (authors "Jon Ludlam" "Marcello Seri") - (license "LGPL-2.0-only WITH OCaml-LGPL-linking-exception") - (depends - (alcotest :with-test) - (base64 (>= "3.1.0")) - fmt - logs - (lwt (>= "3.0.0")) - re - uuid - (qcheck-core :with-test) - ) -) - (package (name vhd-tool) (synopsis "Manipulate .vhd files") @@ -573,10 +491,6 @@ This package provides an Lwt compatible interface to the library.") (name safe-resources) ) -(package - (name rrddump) -) - (package (name rrd-transport) (synopsis "Shared-memory protocols for exposing system metrics") diff --git a/ocaml/networkd/bin/dune b/ocaml/networkd/bin/dune index c6bfffbbf52..9d755a10e37 100644 --- a/ocaml/networkd/bin/dune +++ b/ocaml/networkd/bin/dune @@ -45,13 +45,13 @@ ) (install - (package xapi-networkd) + (package xapi-tools) (section man) (files xcp-networkd.1) ) (install - (package xapi-networkd) + (package xapi-tools) (section sbin) (files (networkd.exe as xcp-networkd)) ) diff --git a/ocaml/networkd/bin_db/dune b/ocaml/networkd/bin_db/dune index b105b554b53..6997bd74d00 100644 --- a/ocaml/networkd/bin_db/dune +++ b/ocaml/networkd/bin_db/dune @@ -1,7 +1,7 @@ (executable (name networkd_db) (public_name networkd_db) - (package xapi-networkd) + (package xapi-tools) (modes exe) (libraries diff --git a/ocaml/networkd/test/dune b/ocaml/networkd/test/dune index 9d7ac2c9248..b3519ce2ec5 100644 --- a/ocaml/networkd/test/dune +++ b/ocaml/networkd/test/dune @@ -15,7 +15,7 @@ (rule (alias runtest) - (package xapi-networkd) + (package xapi-tools) (deps (:x network_test.exe) (source_tree jsonrpc_files) diff --git a/ocaml/squeezed/src/dune b/ocaml/squeezed/src/dune index 55dbbf92949..bb73a91f39d 100644 --- a/ocaml/squeezed/src/dune +++ b/ocaml/squeezed/src/dune @@ -30,7 +30,7 @@ ) (install - (package xapi-squeezed) + (package xapi-tools) (section sbin) (files (squeezed.exe as squeezed)) ) diff --git a/ocaml/squeezed/test/dune b/ocaml/squeezed/test/dune index a7bfdecca92..4d505fc5433 100644 --- a/ocaml/squeezed/test/dune +++ b/ocaml/squeezed/test/dune @@ -1,6 +1,6 @@ (test (name squeeze_test_main) - (package xapi-squeezed) + (package xapi-tools) (flags (:standard -bin-annot)) (libraries alcotest diff --git a/ocaml/tests/dune b/ocaml/tests/dune index 818d9288e70..b51bbca8b80 100644 --- a/ocaml/tests/dune +++ b/ocaml/tests/dune @@ -186,7 +186,7 @@ (rule (deps ../xenopsd/xc/xenops_xc_main.exe) (target xenops_xc_main.disasm) - (package xapi-xenopsd-xc) + (package xapi-tools) (action (with-stdout-to %{target} (run objdump %{deps} --wide -d --no-show-raw-insn) @@ -202,7 +202,7 @@ ) (rule (alias runtest) - (package xapi-xenopsd-xc) + (package xapi-tools) (deps (:script ./unix_select.gawk) (:disasm xenops_xc_main.disasm)) (action (run gawk -f ./%{script} %{disasm})) ) diff --git a/ocaml/wsproxy/cli/dune b/ocaml/wsproxy/cli/dune index 65797a35c69..4d6e72bfe7d 100644 --- a/ocaml/wsproxy/cli/dune +++ b/ocaml/wsproxy/cli/dune @@ -5,7 +5,7 @@ (executable (name wsproxy) (public_name wsproxy) - (package wsproxy) + (package xapi-tools) (libraries fmt logs diff --git a/ocaml/wsproxy/test/dune b/ocaml/wsproxy/test/dune index fafcac25646..0def0c88ccb 100644 --- a/ocaml/wsproxy/test/dune +++ b/ocaml/wsproxy/test/dune @@ -1,6 +1,6 @@ (test (name wsproxy_tests) (modes exe) - (package wsproxy) + (package xapi-tools) (libraries alcotest qcheck-core wslib) ) diff --git a/ocaml/xapi-idl/rrd/dune b/ocaml/xapi-idl/rrd/dune index 8a427a965e3..f7b2a8e7b70 100644 --- a/ocaml/xapi-idl/rrd/dune +++ b/ocaml/xapi-idl/rrd/dune @@ -44,7 +44,7 @@ (executable (name rrd_cli) (public_name rrd-cli) - (package xapi-rrdd) + (package xapi-tools) (modules rrd_cli) (modes exe) (libraries @@ -60,6 +60,6 @@ (rule (alias runtest) (deps (:x rrd_cli.exe)) - (package xapi-rrdd) + (package xapi-tools) (action (run %{x} --help=plain))) diff --git a/ocaml/xcp-rrdd/bin/rrdd/dune b/ocaml/xcp-rrdd/bin/rrdd/dune index 9a0905bb27e..c31182e4142 100644 --- a/ocaml/xcp-rrdd/bin/rrdd/dune +++ b/ocaml/xcp-rrdd/bin/rrdd/dune @@ -74,7 +74,7 @@ ) (install - (package xapi-rrdd) + (package xapi-tools) (files (xcp_rrdd.exe as xcp-rrdd)) (section sbin) ) diff --git a/ocaml/xcp-rrdd/bin/rrddump/dune b/ocaml/xcp-rrdd/bin/rrddump/dune index 0e79375137d..71c62c06db3 100644 --- a/ocaml/xcp-rrdd/bin/rrddump/dune +++ b/ocaml/xcp-rrdd/bin/rrddump/dune @@ -9,6 +9,6 @@ xapi-rrd.unix xmlm ) - (package rrddump) + (package xapi-tools) ) diff --git a/ocaml/xcp-rrdd/bin/transport-rw/dune b/ocaml/xcp-rrdd/bin/transport-rw/dune index 1b933823051..b080d67bd84 100644 --- a/ocaml/xcp-rrdd/bin/transport-rw/dune +++ b/ocaml/xcp-rrdd/bin/transport-rw/dune @@ -2,7 +2,7 @@ (modes exe) (names reader writer) (public_names rrdreader rrdwriter) - (package xapi-rrd-transport-utils) + (package xapi-tools) (libraries cmdliner diff --git a/ocaml/xcp-rrdd/test/rrdd/dune b/ocaml/xcp-rrdd/test/rrdd/dune index bf654c0e66f..77fc26aea49 100644 --- a/ocaml/xcp-rrdd/test/rrdd/dune +++ b/ocaml/xcp-rrdd/test/rrdd/dune @@ -1,7 +1,7 @@ (test (name test_rrdd_monitor) (modes exe) - (package xapi-rrdd) + (package xapi-tools) (libraries alcotest diff --git a/ocaml/xenopsd/cli/dune b/ocaml/xenopsd/cli/dune index 87dcdf98c8a..9b4b9baa7d6 100644 --- a/ocaml/xenopsd/cli/dune +++ b/ocaml/xenopsd/cli/dune @@ -29,7 +29,7 @@ (install (files (main.exe as xenops-cli)) (section sbin) - (package xapi-xenopsd-cli) + (package xapi-tools) ) (rule @@ -49,5 +49,5 @@ (install (section man) (files xenops-cli.1.gz) - (package xapi-xenopsd-cli) + (package xapi-tools) ) diff --git a/ocaml/xenopsd/dbgring/dune b/ocaml/xenopsd/dbgring/dune index 7e50d7f83f6..b9d4773b34e 100644 --- a/ocaml/xenopsd/dbgring/dune +++ b/ocaml/xenopsd/dbgring/dune @@ -1,7 +1,7 @@ (executable (name dbgring) ; (public_name dbgring) -; (package xapi-xenopsd-xc) +; (package xapi-tools) (libraries xapi_xenopsd diff --git a/ocaml/xenopsd/list_domains/dune b/ocaml/xenopsd/list_domains/dune index be8407cb32d..4cf065125c9 100644 --- a/ocaml/xenopsd/list_domains/dune +++ b/ocaml/xenopsd/list_domains/dune @@ -1,6 +1,6 @@ (executable (name list_domains) (public_name list_domains) - (package xapi-xenopsd-xc) + (package xapi-tools) (libraries xenctrl xapi-idl.memory ezxenstore.watch uuid) ) diff --git a/ocaml/xenopsd/pvs/dune b/ocaml/xenopsd/pvs/dune index facd232e0f2..bbd88cbb772 100644 --- a/ocaml/xenopsd/pvs/dune +++ b/ocaml/xenopsd/pvs/dune @@ -5,7 +5,7 @@ (install (section libexec_root) - (package xapi-xenopsd-xc) + (package xapi-tools) (files (pvs_proxy_setup.exe as pvs-proxy-ovs-setup) ) diff --git a/ocaml/xenopsd/scripts/dune b/ocaml/xenopsd/scripts/dune index 036965c492e..b58989d5a4d 100644 --- a/ocaml/xenopsd/scripts/dune +++ b/ocaml/xenopsd/scripts/dune @@ -1,6 +1,6 @@ (install (section libexec_root) - (package xapi-xenopsd-xc) + (package xapi-tools) (files (vif as vif) (vif-real as vif-real) diff --git a/ocaml/xenopsd/simulator/dune b/ocaml/xenopsd/simulator/dune index 295e1b43ab4..3d6248ff6b5 100644 --- a/ocaml/xenopsd/simulator/dune +++ b/ocaml/xenopsd/simulator/dune @@ -10,7 +10,7 @@ (install (files (xenops_simulator_main.exe as xenopsd-simulator)) (section sbin) - (package xapi-xenopsd-simulator) + (package xapi-tools) ) (rule @@ -30,5 +30,5 @@ (install (section man) (files xenopsd-simulator.1.gz) - (package xapi-xenopsd-simulator) + (package xapi-tools) ) diff --git a/ocaml/xenopsd/test/dune b/ocaml/xenopsd/test/dune index 2d3d34cc709..eb68e8ed393 100644 --- a/ocaml/xenopsd/test/dune +++ b/ocaml/xenopsd/test/dune @@ -1,7 +1,7 @@ (test (name test) (modes exe) - (package xapi-xenopsd-xc) + (package xapi-tools) (libraries alcotest cpuid @@ -26,7 +26,7 @@ (rule (alias runtest) - (package xapi-xenopsd-simulator) + (package xapi-tools) (deps (:x ../simulator/xenops_simulator_main.exe) ) diff --git a/ocaml/xenopsd/tools/dune b/ocaml/xenopsd/tools/dune index 4736daa0972..cdd062604df 100644 --- a/ocaml/xenopsd/tools/dune +++ b/ocaml/xenopsd/tools/dune @@ -6,5 +6,5 @@ (install (files (set_domain_uuid.exe as set-domain-uuid)) (section libexec_root) - (package xapi-xenopsd-xc) + (package xapi-tools) ) diff --git a/ocaml/xenopsd/xc/dune b/ocaml/xenopsd/xc/dune index 5ad84eb8097..1ee8a87e6e5 100644 --- a/ocaml/xenopsd/xc/dune +++ b/ocaml/xenopsd/xc/dune @@ -108,7 +108,7 @@ (install (files (xenops_xc_main.exe as xenopsd-xc)) (section sbin) - (package xapi-xenopsd-xc) + (package xapi-tools) ) (executable @@ -169,5 +169,5 @@ (install (section man) (files xenopsd-xc.1.gz) - (package xapi-xenopsd-xc) + (package xapi-tools) ) diff --git a/ocaml/xs-trace/dune b/ocaml/xs-trace/dune index 4e2213ce287..e34fc7e5575 100644 --- a/ocaml/xs-trace/dune +++ b/ocaml/xs-trace/dune @@ -2,7 +2,7 @@ (modes exe) (name xs_trace) (public_name xs-trace) - (package xapi-xenopsd-xc) + (package xapi-tools) (libraries uri tracing diff --git a/rrddump.opam b/rrddump.opam deleted file mode 100644 index 84464b418d3..00000000000 --- a/rrddump.opam +++ /dev/null @@ -1,16 +0,0 @@ -# This file is generated by dune, edit dune-project instead -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -opam-version: "2.0" -synopsis: "Tool to dump RRD contents to XML format" -description: "Tool to dump RRD contents to XML format" -maintainer: "xen-api@lists.xen.org" -authors: "John Else" -tags: "org:xapi-project" -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -depends: ["rrd-transport" "xapi-rrd" "xmlm" "dune" {>= "3.15"}] -build: ["dune" "build" "-p" name "-j" jobs] -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/rrddump.opam.template b/rrddump.opam.template deleted file mode 100644 index 1237bb624d5..00000000000 --- a/rrddump.opam.template +++ /dev/null @@ -1,14 +0,0 @@ -opam-version: "2.0" -synopsis: "Tool to dump RRD contents to XML format" -description: "Tool to dump RRD contents to XML format" -maintainer: "xen-api@lists.xen.org" -authors: "John Else" -tags: "org:xapi-project" -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -depends: ["rrd-transport" "xapi-rrd" "xmlm" "dune" {>= "3.15"}] -build: ["dune" "build" "-p" name "-j" jobs] -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/wsproxy.opam b/wsproxy.opam deleted file mode 100644 index 0d9e79c096c..00000000000 --- a/wsproxy.opam +++ /dev/null @@ -1,35 +0,0 @@ -# This file is generated by dune, edit dune-project instead -opam-version: "2.0" -synopsis: "Websockets proxy for VNC traffic" -maintainer: ["Xapi project maintainers"] -authors: ["Jon Ludlam" "Marcello Seri"] -license: "LGPL-2.0-only WITH OCaml-LGPL-linking-exception" -homepage: "https://xapi-project.github.io/" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -depends: [ - "dune" {>= "3.15"} - "alcotest" {with-test} - "base64" {>= "3.1.0"} - "fmt" - "logs" - "lwt" {>= "3.0.0"} - "re" - "uuid" - "qcheck-core" {with-test} - "odoc" {with-doc} -] -build: [ - ["dune" "subst"] {dev} - [ - "dune" - "build" - "-p" - name - "-j" - jobs - "@install" - "@runtest" {with-test} - "@doc" {with-doc} - ] -] -dev-repo: "git+https://github.com/xapi-project/xen-api.git" diff --git a/xapi-rrd-transport-utils.opam b/xapi-rrd-transport-utils.opam deleted file mode 100644 index 754b956f157..00000000000 --- a/xapi-rrd-transport-utils.opam +++ /dev/null @@ -1,34 +0,0 @@ -# This file is generated by dune, edit dune-project instead -opam-version: "2.0" -synopsis: "Shared-memory protocols for exposing performance counters" -description: - "VMs running on a Xen host can use this library to expose performance counters which can be sampled by the xapi performance monitoring daemon." -maintainer: ["Xapi project maintainers"] -authors: ["John Else"] -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -homepage: "https://xapi-project.github.io/" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -depends: [ - "dune" {>= "3.15"} - "ocaml" - "cmdliner" - "rrd-transport" {= version} - "xapi-idl" {= version} - "xapi-rrd" {= version} - "odoc" {with-doc} -] -build: [ - ["dune" "subst"] {dev} - [ - "dune" - "build" - "-p" - name - "-j" - jobs - "@install" - "@runtest" {with-test} - "@doc" {with-doc} - ] -] -dev-repo: "git+https://github.com/xapi-project/xen-api.git" diff --git a/xapi-rrdd.opam b/xapi-rrdd.opam deleted file mode 100644 index 62d448f2869..00000000000 --- a/xapi-rrdd.opam +++ /dev/null @@ -1,57 +0,0 @@ -# This file is generated by dune, edit dune-project instead -opam-version: "2.0" -synopsis: "Performance monitoring daemon for xapi" -description: - "This daemon monitors 'datasources' i.e. time-varying values such as performance counters and records the samples in RRD archives. These archives can be used to examine historical performance trends." -maintainer: ["Xapi project maintainers"] -authors: ["xen-api@lists.xen.org"] -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -homepage: "https://xapi-project.github.io/" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -depends: [ - "dune" {>= "3.15"} - "ocaml" {>= "4.02.0"} - "alcotest" {with-test} - "astring" - "cmdliner" - "fmt" {with-test} - "gzip" {= version} - "http-lib" {= version} - "inotify" - "io-page" - "ipaddr" - "mtime" - "polly" - "ppx_deriving_rpc" - "rpclib" - "uri" - "ezxenstore" {= version} - "uuid" {= version} - "xapi-backtrace" - "xapi-idl" {= version} - "xapi-rrd" {= version} - "xapi-stdext-threads" {= version} - "xapi-stdext-unix" {= version} - "xapi-tracing" - "xenctrl" - "xenstore" - "xenstore_transport" - "xmlm" - "yojson" - "odoc" {with-doc} -] -build: [ - ["dune" "subst"] {dev} - [ - "dune" - "build" - "-p" - name - "-j" - jobs - "@install" - "@runtest" {with-test} - "@doc" {with-doc} - ] -] -dev-repo: "git+https://github.com/xapi-project/xen-api.git" diff --git a/xapi-squeezed.opam b/xapi-squeezed.opam deleted file mode 100644 index 7c4995d7e4f..00000000000 --- a/xapi-squeezed.opam +++ /dev/null @@ -1,40 +0,0 @@ -# This file is generated by dune, edit dune-project instead -authors: ["xen-api@lists.xen.org"] -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -opam-version: "2.0" -maintainer: "xen-api@lists.xen.org" -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -build: [ - ["dune" "build" "-p" name "-j" jobs] - ["dune" "build" "-p" name "-j" jobs "@runtest"] {with-test} -] -depends: [ - "ocaml" - "alcotest" {with-test} - "astring" - "cohttp" {>= "0.11.0"} - "dune" {>= "3.15"} - "re" - "rpclib" - "uri" - "uuid" - "xapi-idl" - "xapi-log" - "xapi-stdext-pervasives" - "xapi-stdext-threads" - "xapi-stdext-unix" - "xapi-types" - "xenctrl" {>= "0.9.20" & with-test} - "xenstore" - "xenstore_transport" -] -synopsis: "A memory ballooning daemon for the Xen hypervisor" -description: """ -The squeezed daemon shares host memory among running VMs using the -balloon drivers to move memory.""" -url { - src: - "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi-squeezed.opam.template b/xapi-squeezed.opam.template deleted file mode 100644 index aa928933c0d..00000000000 --- a/xapi-squeezed.opam.template +++ /dev/null @@ -1,37 +0,0 @@ -opam-version: "2.0" -maintainer: "xen-api@lists.xen.org" -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -build: [ - ["dune" "build" "-p" name "-j" jobs] - ["dune" "build" "-p" name "-j" jobs "@runtest"] {with-test} -] -depends: [ - "ocaml" - "alcotest" {with-test} - "astring" - "cohttp" {>= "0.11.0"} - "dune" {>= "3.15"} - "re" - "rpclib" - "uri" - "uuid" - "xapi-idl" - "xapi-log" - "xapi-stdext-pervasives" - "xapi-stdext-threads" - "xapi-stdext-unix" - "xapi-types" - "xenctrl" {>= "0.9.20" & with-test} - "xenstore" - "xenstore_transport" -] -synopsis: "A memory ballooning daemon for the Xen hypervisor" -description: """ -The squeezed daemon shares host memory among running VMs using the -balloon drivers to move memory.""" -url { - src: - "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi-networkd.opam b/xapi-tools.opam similarity index 63% rename from xapi-networkd.opam rename to xapi-tools.opam index a7df883bc7d..62a7bba7c8c 100644 --- a/xapi-networkd.opam +++ b/xapi-tools.opam @@ -1,35 +1,37 @@ # This file is generated by dune, edit dune-project instead opam-version: "2.0" -synopsis: "The XCP networking daemon" +synopsis: "Various daemons and CLI applications required by XAPI" +description: "Includes message-switch, xenopsd, forkexecd, ..." maintainer: ["Xapi project maintainers"] -authors: ["Jon Ludlam"] +authors: ["xen-api@lists.xen.org"] license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" homepage: "https://xapi-project.github.io/" bug-reports: "https://github.com/xapi-project/xen-api/issues" depends: [ "dune" {>= "3.15"} - "alcotest" {with-test} "astring" - "base-threads" - "forkexec" {= version} - "http-lib" {= version} - "integers" + "base64" + "cmdliner" + "cstruct-unix" + "fmt" + "logs" + "lwt" "mtime" "netlink" + "qmp" "re" "result" - "rresult" "rpclib" + "rresult" "uri" - "xapi-idl" {= version} - "xapi-inventory" - "xapi-stdext-pervasives" {= version} - "xapi-stdext-std" {= version} - "xapi-stdext-threads" {= version} - "xapi-stdext-unix" {= version} - "xapi-test-utils" - "xen-api-client" {= version} + "xenctrl" + "xmlm" "yojson" + "alcotest" {with-test} + "ppx_deriving_rpc" {with-test} + "qcheck-core" {with-test} + "xapi-test-utils" {with-test} + "xenstore_transport" {with-test} "odoc" {with-doc} ] build: [ diff --git a/xapi-xenopsd-cli.opam b/xapi-xenopsd-cli.opam deleted file mode 100644 index dfd3eab41f8..00000000000 --- a/xapi-xenopsd-cli.opam +++ /dev/null @@ -1,36 +0,0 @@ -# This file is generated by dune, edit dune-project instead -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -opam-version: "2.0" -maintainer: "xen-api@lists.xen.org" -authors: [ "xen-api@lists.xen.org" ] -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -build: [ - ["dune" "build" "-p" name "-j" jobs] - ["dune" "runtest" "-p" name "-j" jobs] {with-test} -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "astring" - "base-threads" - "cmdliner" - "ppx_deriving_rpc" - "re" - "result" - "rpclib" - "rresult" - "uuid" - "uuidm" - "xapi-idl" - "xenstore_transport" {with-test} - "yojson" -] -synopsis: "A simple command-line tool for interacting with xenopsd" -description: """ -A simple command-line tool for interacting with xenopsd -""" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi-xenopsd-cli.opam.template b/xapi-xenopsd-cli.opam.template deleted file mode 100644 index da363888e1b..00000000000 --- a/xapi-xenopsd-cli.opam.template +++ /dev/null @@ -1,34 +0,0 @@ -opam-version: "2.0" -maintainer: "xen-api@lists.xen.org" -authors: [ "xen-api@lists.xen.org" ] -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -build: [ - ["dune" "build" "-p" name "-j" jobs] - ["dune" "runtest" "-p" name "-j" jobs] {with-test} -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "astring" - "base-threads" - "cmdliner" - "ppx_deriving_rpc" - "re" - "result" - "rpclib" - "rresult" - "uuid" - "uuidm" - "xapi-idl" - "xenstore_transport" {with-test} - "yojson" -] -synopsis: "A simple command-line tool for interacting with xenopsd" -description: """ -A simple command-line tool for interacting with xenopsd -""" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi-xenopsd-simulator.opam b/xapi-xenopsd-simulator.opam deleted file mode 100644 index 16017fb218a..00000000000 --- a/xapi-xenopsd-simulator.opam +++ /dev/null @@ -1,40 +0,0 @@ -# This file is generated by dune, edit dune-project instead -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -opam-version: "2.0" -name: "xapi-xenopsd-simulator" -maintainer: "xen-api@lists.xen.org" -authors: "xen-api@lists.xen.org" -homepage: "https://github.com/xapi-project/xen-api" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -build: [ - ["./configure"] - [ "dune" "build" "-p" name "-j" jobs ] -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "base-unix" - "astring" - "cohttp" - "fd-send-recv" - "fmt" - "ppx_deriving_rpc" - "ppx_sexp_conv" - "re" - "result" - "rpclib" - "rresult" - "sexplib" - "sexplib0" - "uri" - "uuidm" - "uutf" - "xapi-backtrace" - "xmlm" -] -synopsis: - "Simulation backend allowing testing of the higher-level xenops logic" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi-xenopsd-simulator.opam.template b/xapi-xenopsd-simulator.opam.template deleted file mode 100644 index a11d9c74c19..00000000000 --- a/xapi-xenopsd-simulator.opam.template +++ /dev/null @@ -1,38 +0,0 @@ -opam-version: "2.0" -name: "xapi-xenopsd-simulator" -maintainer: "xen-api@lists.xen.org" -authors: "xen-api@lists.xen.org" -homepage: "https://github.com/xapi-project/xen-api" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -build: [ - ["./configure"] - [ "dune" "build" "-p" name "-j" jobs ] -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "base-unix" - "astring" - "cohttp" - "fd-send-recv" - "fmt" - "ppx_deriving_rpc" - "ppx_sexp_conv" - "re" - "result" - "rpclib" - "rresult" - "sexplib" - "sexplib0" - "uri" - "uuidm" - "uutf" - "xapi-backtrace" - "xmlm" -] -synopsis: - "Simulation backend allowing testing of the higher-level xenops logic" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi-xenopsd-xc.opam b/xapi-xenopsd-xc.opam deleted file mode 100644 index 9b2f6771bc4..00000000000 --- a/xapi-xenopsd-xc.opam +++ /dev/null @@ -1,67 +0,0 @@ -# This file is generated by dune, edit dune-project instead -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -opam-version: "2.0" -name: "xapi-xenopsd-xc" -maintainer: "xen-api@lists.xen.org" -authors: "xen-api@lists.xen.org" -homepage: "https://github.com/xapi-project/xen-api" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -build: [ - ["./configure"] - [ "dune" "build" "-p" name "-j" jobs ] - [ "dune" "runtest" "-p" name "-j" jobs] {with-test} -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "alcotest" {with-test} - "astring" - "base-threads" - "base-unix" - "base64" - "bos" - "cmdliner" - "cohttp" - "conf-xen" - "ezxenstore" - "fd-send-recv" - "fmt" {with-test} - "forkexec" - "inotify" - "mtime" - "polly" - "ppx_deriving_rpc" {with-test} - "ppx_sexp_conv" - "qmp" - "re" - "result" {with-test} - "rpclib" {with-test} - "rresult" - "sexplib" - "sexplib0" - "uri" - "uuid" - "uuidm" - "uutf" - "xapi-backtrace" - "xapi-idl" - "xapi-rrd" - "xapi-stdext-date" - "xapi-stdext-pervasives" - "xapi-stdext-std" - "xapi-stdext-threads" - "xapi-stdext-unix" - "xapi-test-utils" {with-test} - "xenctrl" - "xenstore" - "xenstore_transport" {with-test} - "xenmmap" - "xmlm" -] -synopsis: - "A xenops plugin which knows how to use xenstore, xenctrl and xenguest to manage" -description: "VMs on a xen host." -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xapi-xenopsd-xc.opam.template b/xapi-xenopsd-xc.opam.template deleted file mode 100644 index 92b68a4f1a7..00000000000 --- a/xapi-xenopsd-xc.opam.template +++ /dev/null @@ -1,65 +0,0 @@ -opam-version: "2.0" -name: "xapi-xenopsd-xc" -maintainer: "xen-api@lists.xen.org" -authors: "xen-api@lists.xen.org" -homepage: "https://github.com/xapi-project/xen-api" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -build: [ - ["./configure"] - [ "dune" "build" "-p" name "-j" jobs ] - [ "dune" "runtest" "-p" name "-j" jobs] {with-test} -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "alcotest" {with-test} - "astring" - "base-threads" - "base-unix" - "base64" - "bos" - "cmdliner" - "cohttp" - "conf-xen" - "ezxenstore" - "fd-send-recv" - "fmt" {with-test} - "forkexec" - "inotify" - "mtime" - "polly" - "ppx_deriving_rpc" {with-test} - "ppx_sexp_conv" - "qmp" - "re" - "result" {with-test} - "rpclib" {with-test} - "rresult" - "sexplib" - "sexplib0" - "uri" - "uuid" - "uuidm" - "uutf" - "xapi-backtrace" - "xapi-idl" - "xapi-rrd" - "xapi-stdext-date" - "xapi-stdext-pervasives" - "xapi-stdext-std" - "xapi-stdext-threads" - "xapi-stdext-unix" - "xapi-test-utils" {with-test} - "xenctrl" - "xenstore" - "xenstore_transport" {with-test} - "xenmmap" - "xmlm" -] -synopsis: - "A xenops plugin which knows how to use xenstore, xenctrl and xenguest to manage" -description: "VMs on a xen host." -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} From 89563777283d41a73780ddd60bfebef4dd5cd2a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Mon, 16 Sep 2024 16:24:19 +0100 Subject: [PATCH 023/141] CP-51479: [maintenance]: use common dune install and uninstall variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Edwin Török --- Makefile | 52 +++++++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/Makefile b/Makefile index 29395da6340..3717f485b04 100644 --- a/Makefile +++ b/Makefile @@ -146,27 +146,37 @@ install-extra: install -D ./ocaml/xenopsd/scripts/pygrub-wrapper $(DESTDIR)/$(QEMU_WRAPPER_DIR)/pygrub-wrapper DESTDIR=$(DESTDIR) SBINDIR=$(SBINDIR) QEMU_WRAPPER_DIR=$(QEMU_WRAPPER_DIR) XENOPSD_LIBEXECDIR=$(XENOPSD_LIBEXECDIR) ETCDIR=$(ETCDIR) ./ocaml/xenopsd/scripts/make-custom-xenopsd.conf +# common flags and packages for 'dune install' and 'dune uninstall' +DUNE_IU_PACKAGES1=-j $(JOBS) --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) +DUNE_IU_PACKAGES1+=--libexecdir=$(XENOPSD_LIBEXECDIR) --datadir=$(SDKDIR) +DUNE_IU_PACKAGES1+=xapi-client xapi-schema xapi-consts xapi-cli-protocol xapi-datamodel xapi-types +DUNE_IU_PACKAGES1+=xen-api-client xen-api-client-lwt xen-api-client-async rrdd-plugin rrd-transport +DUNE_IU_PACKAGES1+=gzip http-lib pciutil sexpr stunnel uuid xml-light2 zstd xapi-compression safe-resources +DUNE_IU_PACKAGES1+=message-switch message-switch-async message-switch-cli message-switch-core message-switch-lwt +DUNE_IU_PACKAGES1+=message-switch-unix xapi-idl forkexec xapi-forkexecd xapi-storage xapi-storage-script xapi-storage-cli +DUNE_IU_PACKAGES1+=xapi-nbd varstored-guard xapi-log xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix +DUNE_IU_PACKAGES1+=xapi-rrd xapi-inventory clock xapi-sdk +DUNE_IU_PACKAGES1+=xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck xapi-tools + + install-dune1: # dune can install libraries and several other files into the right locations - dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) \ - --libexecdir=$(XENOPSD_LIBEXECDIR) --datadir=$(SDKDIR) \ - xapi-client xapi-schema xapi-consts xapi-cli-protocol xapi-datamodel xapi-types \ - xen-api-client xen-api-client-lwt xen-api-client-async rrdd-plugin rrd-transport \ - gzip http-lib pciutil sexpr stunnel uuid xml-light2 zstd xapi-compression safe-resources \ - message-switch message-switch-async message-switch-cli message-switch-core message-switch-lwt \ - message-switch-unix xapi-idl forkexec xapi-forkexecd xapi-storage xapi-storage-script xapi-storage-cli \ - xapi-nbd varstored-guard xapi-log xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix \ - xapi-rrd xapi-inventory clock xapi-sdk\ - xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck xapi-tools + dune install $(DUNE_IU_PACKAGES1) +DUNE_IU_PACKAGES2=-j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --datadir=$(DOCDIR) xapi xe + install-dune2: - dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --datadir=$(DOCDIR) xapi xe + dune install $(DUNE_IU_PACKAGES2) + +DUNE_IU_PACKAGES3=-j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --bindir=$(OPTDIR)/debug --datadir=$(OPTDIR)/debug xapi-debug install-dune3: - dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --bindir=$(OPTDIR)/debug --datadir=$(OPTDIR)/debug xapi-debug + dune install $(DUNE_IU_PACKAGES3) + +DUNE_IU_PACKAGES4=-j $(JOBS) --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --libexecdir=/usr/libexec --mandir=$(MANDIR) vhd-tool install-dune4: - dune install -j $(JOBS) --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --libexecdir=/usr/libexec --mandir=$(MANDIR) vhd-tool + dune install $(DUNE_IU_PACKAGES4) install: $(MAKE) -j $(JOBS) install-parallel @@ -181,18 +191,10 @@ install: uninstall: # only removes what was installed with `dune install` - dune uninstall --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) \ - xapi-client xapi-schema xapi-consts xapi-cli-protocol xapi-datamodel xapi-types \ - xen-api-client xen-api-client-lwt xen-api-client-async rrdd-plugin rrd-transport \ - gzip http-lib pciutil sexpr stunnel uuid xml-light2 zstd xapi-compression safe-resources \ - message-switch message-switch-async message-switch-cli message-switch-core message-switch-lwt \ - message-switch-unix xapi-idl forkexec xapi-forkexecd xapi-storage xapi-storage-script xapi-log \ - xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix \ - xapi-rrd xapi-inventory clock xapi-sdk\ - xapi-stdext-date xapi-stdext-encodings xapi-stdext-pervasives xapi-stdext-std xapi-stdext-threads xapi-stdext-unix xapi-stdext-zerocheck xapi-tools - dune uninstall --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) xapi xe - dune uninstall --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(XENOPSD_LIBEXECDIR) xapi-tools - dune uninstall --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --bindir=$(OPTDIR)/debug --datadir=$(OPTDIR)/debug xapi-debug + dune uninstall $(DUNE_IU_PACKAGES1) + dune uninstall $(DUNE_IU_PACKAGES2) + dune uninstall $(DUNE_IU_PACKAGES3) + dune uninstall $(DUNE_IU_PACKAGES4) compile_flags.txt: Makefile (ocamlc -config-var ocamlc_cflags;\ From 4ce9906d9a3dfc745959fc8960998bcb6c1868b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 18 Sep 2024 09:51:28 +0100 Subject: [PATCH 024/141] CP-51479: [maintenance]: use 'if' instead of boolean logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Improves: 1bb454377364 ("CP-51479: [maintenance]: install SDK files using dune rules") Signed-off-by: Edwin Török --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3717f485b04..186b6c3e92f 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ OPTMANDIR ?= $(OPTDIR)/man/man1/ build: [ -z "${XAPI_VERSION}" ] || (sed -i '/(version.*)/d' dune-project && echo "(version ${XAPI_VERSION})" >> dune-project) # if available use external file, otherwise use built-in, this allows building XAPI without being root - ! test -f $(SHAREDIR)/sm/XE_SR_ERRORCODES.xml || cp $(SHAREDIR)/sm/XE_SR_ERRORCODES.xml ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml + if test -f $(SHAREDIR)/sm/XE_SR_ERRORCODES.xml; then cp $(SHAREDIR)/sm/XE_SR_ERRORCODES.xml ocaml/sdk-gen/csharp/XE_SR_ERRORCODES.xml; fi dune build @ocaml/idl/update-dm-lifecycle -j $(JOBS) --profile=$(PROFILE) --auto-promote || dune build @ocaml/idl/update-dm-lifecycle -j $(JOBS) --profile=$(PROFILE) --auto-promote dune build -j $(JOBS) --profile=$(PROFILE) @install @ocaml/xapi-storage/python/xapi/storage/api/v5/python @ocaml/xapi-doc @ocaml/sdk-gen/sdkgen From 0a8fc6e64b0c59a8cb2d5ddf6d9fba68e9d0e7b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Thu, 19 Sep 2024 18:38:56 +0100 Subject: [PATCH 025/141] CP-51479: [maintenance]: fix opam dependencies after epoll merge MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit xs-opam build was failing due to missing dep between ezxenstore and xapi-stext-unix. opam-dune-lint didn't spot this one. Signed-off-by: Edwin Török --- xapi-datamodel.opam | 1 + xapi-datamodel.opam.template | 1 + 2 files changed, 2 insertions(+) diff --git a/xapi-datamodel.opam b/xapi-datamodel.opam index 4bc3b8ab90f..5925986447d 100644 --- a/xapi-datamodel.opam +++ b/xapi-datamodel.opam @@ -13,6 +13,7 @@ build: [ depends: [ "ocaml" "dune" {>= "3.15"} + "astring" {with-test} "mustache" "ppx_deriving_rpc" "rpclib" diff --git a/xapi-datamodel.opam.template b/xapi-datamodel.opam.template index aa34087cffc..22c306da48c 100644 --- a/xapi-datamodel.opam.template +++ b/xapi-datamodel.opam.template @@ -11,6 +11,7 @@ build: [ depends: [ "ocaml" "dune" {>= "3.15"} + "astring" {with-test} "mustache" "ppx_deriving_rpc" "rpclib" From b17a9424d209f389bbb77f75b9eccd50b766670e Mon Sep 17 00:00:00 2001 From: Mark Syms Date: Fri, 20 Sep 2024 13:37:49 +0100 Subject: [PATCH 026/141] Extend the Data.stat status to include `complete` Signed-off-by: Mark Syms --- ocaml/xapi-storage/generator/lib/data.ml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi-storage/generator/lib/data.ml b/ocaml/xapi-storage/generator/lib/data.ml index 7aa66db8909..36b5d20f87d 100644 --- a/ocaml/xapi-storage/generator/lib/data.ml +++ b/ocaml/xapi-storage/generator/lib/data.ml @@ -240,6 +240,7 @@ module Data (R : RPC) = struct open R type copy_operation_v1 = string [@@deriving rpcty] + type mirror_operation_v1 = string [@@deriving rpcty] (** The primary key for referring to a long-running operation. *) @@ -259,7 +260,10 @@ module Data (R : RPC) = struct type status = { failed: bool (** [failed] will be set to true if the operation has failed for some - reason. *) + reason. *) + ; complete: bool + (** [complete] will be set true if the operation is complete, whether + successfully or not, see [failed]. *) ; progress: float option (** [progress] will be returned for a copy operation, and ranges between 0 and 1. *) From 48f12a7edf7e76fc98120fb84b1490b8ba2cbfeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 20 Sep 2024 09:13:48 +0100 Subject: [PATCH 027/141] CP-51479: [maintenance]: fix opam dependency on rrd-transport MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This doesn't appear to be epoll related but causes an xs-opam failure. Signed-off-by: Edwin Török --- dune-project | 4 ++++ xapi-tools.opam | 1 + 2 files changed, 5 insertions(+) diff --git a/dune-project b/dune-project index 09d6dc29894..fc74adc8a6e 100644 --- a/dune-project +++ b/dune-project @@ -300,6 +300,10 @@ xenctrl xmlm yojson + ; can't use '= version' here yet, + ; 'xapi-tools' will have version ~dev, not 'master' like all the others + ; because it is not in xs-opam yet + rrd-transport (alcotest :with-test) (ppx_deriving_rpc :with-test) (qcheck-core :with-test) diff --git a/xapi-tools.opam b/xapi-tools.opam index 62a7bba7c8c..ba0f73b479f 100644 --- a/xapi-tools.opam +++ b/xapi-tools.opam @@ -27,6 +27,7 @@ depends: [ "xenctrl" "xmlm" "yojson" + "rrd-transport" "alcotest" {with-test} "ppx_deriving_rpc" {with-test} "qcheck-core" {with-test} From 773b1bb35c1254748b973030c3a74ffcd2539f31 Mon Sep 17 00:00:00 2001 From: Colin James Date: Tue, 10 Sep 2024 12:44:29 +0100 Subject: [PATCH 028/141] CP-51034: Add cache-related fields to pool object Adds 3 new fields to the pool object: - ext_auth_cach_enabled: bool Specifies whether or not external authentication caching is enabled for this pool. The field is false by default. - ext_auth_cach_size: int Specifies the maximum capacity of the external authentication cache. Its default value is 50. - ext_auth_cache_expiry: int Specifies how long cached entries should remain cached. The default value is 300 seconds (5 minutes). Previously, these options were configured from xapi.conf, but in the case of designation of new pool coordinator (by autonomous means or otherwise), we would like for the feature's configuration to be adopted by new coordinator (without necessitating duplicate xapi configurations across hosts in the pool). The cache's contents itself will not be preserved, but caching will resume on the new coordinator if it was enabled on the previous coordinator. Signed-off-by: Colin James --- ocaml/idl/datamodel_common.ml | 2 +- ocaml/idl/datamodel_pool.ml | 49 +++++++++++++++++++++++++++++++ ocaml/idl/schematest.ml | 2 +- ocaml/tests/common/test_common.ml | 6 ++-- ocaml/xapi/dbsync_master.ml | 3 +- ocaml/xapi/message_forwarding.ml | 18 ++++++++++++ ocaml/xapi/xapi_pool.ml | 23 +++++++++++++++ ocaml/xapi/xapi_pool.mli | 9 ++++++ 8 files changed, 107 insertions(+), 5 deletions(-) diff --git a/ocaml/idl/datamodel_common.ml b/ocaml/idl/datamodel_common.ml index e66ab3eff93..45b9d3068e6 100644 --- a/ocaml/idl/datamodel_common.ml +++ b/ocaml/idl/datamodel_common.ml @@ -10,7 +10,7 @@ open Datamodel_roles to leave a gap for potential hotfixes needing to increment the schema version.*) let schema_major_vsn = 5 -let schema_minor_vsn = 781 +let schema_minor_vsn = 782 (* Historical schema versions just in case this is useful later *) let rio_schema_major_vsn = 5 diff --git a/ocaml/idl/datamodel_pool.ml b/ocaml/idl/datamodel_pool.ml index cdc830add08..198c8b5a83a 100644 --- a/ocaml/idl/datamodel_pool.ml +++ b/ocaml/idl/datamodel_pool.ml @@ -1136,6 +1136,40 @@ let set_ext_auth_max_threads = ~params:[(Ref _pool, "self", "The pool"); (Int, "value", "The new maximum")] ~allowed_roles:_R_POOL_OP () +let set_ext_auth_cache_enabled = + call ~name:"set_ext_auth_cache_enabled" ~lifecycle:[] + ~params: + [ + (Ref _pool, "self", "The pool") + ; ( Bool + , "value" + , "Specifies whether caching is enabled for external authentication" + ) + ] + ~hide_from_docs:true ~allowed_roles:_R_POOL_OP () + +let set_ext_auth_cache_size = + call ~name:"set_ext_auth_cache_size" ~lifecycle:[] + ~params: + [ + (Ref _pool, "self", "The pool") + ; (Int, "value", "The capacity of the external authentication cache") + ] + ~hide_from_docs:true ~allowed_roles:_R_POOL_OP () + +let set_ext_auth_cache_expiry = + call ~name:"set_ext_auth_cache_expiry" ~lifecycle:[] + ~params: + [ + (Ref _pool, "self", "The pool") + ; ( Int + , "value" + , "The expiry time of entries in the external authentication cache (in \ + seconds - 300 seconds, i.e. 5 minutes, is the default value)" + ) + ] + ~hide_from_docs:true ~allowed_roles:_R_POOL_OP () + let pool_guest_secureboot_readiness = Enum ( "pool_guest_secureboot_readiness" @@ -1245,6 +1279,9 @@ let t = ; set_update_sync_enabled ; set_local_auth_max_threads ; set_ext_auth_max_threads + ; set_ext_auth_cache_enabled + ; set_ext_auth_cache_size + ; set_ext_auth_cache_expiry ; get_guest_secureboot_readiness ] ~contents: @@ -1488,6 +1525,18 @@ let t = ; field ~qualifier:StaticRO ~ty:Int ~default_value:(Some (VInt 1L)) ~lifecycle:[] "ext_auth_max_threads" "Maximum number of threads to use for external (AD) authentication" + ; field ~qualifier:DynamicRO ~ty:Bool + ~default_value:(Some (VBool false)) ~lifecycle:[] + "ext_auth_cache_enabled" + "Specifies whether external authentication caching is enabled for \ + this pool or not" + ; field ~qualifier:DynamicRO ~ty:Int ~default_value:(Some (VInt 50L)) + ~lifecycle:[] "ext_auth_cache_size" + "Maximum capacity of external authentication cache" + ; field ~qualifier:DynamicRO ~ty:Int ~default_value:(Some (VInt 300L)) + ~lifecycle:[] "ext_auth_cache_expiry" + "Specifies how long external authentication entries should be \ + cached for (seconds)" ; field ~lifecycle:[] ~qualifier:DynamicRO ~ty:(Ref _secret) ~default_value:(Some (VRef null_ref)) "telemetry_uuid" "The UUID of the pool for identification of telemetry data" diff --git a/ocaml/idl/schematest.ml b/ocaml/idl/schematest.ml index 0afe0a10be1..611dc17f605 100644 --- a/ocaml/idl/schematest.ml +++ b/ocaml/idl/schematest.ml @@ -3,7 +3,7 @@ let hash x = Digest.string x |> Digest.to_hex (* BEWARE: if this changes, check that schema has been bumped accordingly in ocaml/idl/datamodel_common.ml, usually schema_minor_vsn *) -let last_known_schema_hash = "60590fa3fa2f8af66d9bf3c50b7bacc2" +let last_known_schema_hash = "5f1637f4ddfaa2a0dfb6cfc318451855" let current_schema_hash : string = let open Datamodel_types in diff --git a/ocaml/tests/common/test_common.ml b/ocaml/tests/common/test_common.ml index 293317518a4..c327914b0f9 100644 --- a/ocaml/tests/common/test_common.ml +++ b/ocaml/tests/common/test_common.ml @@ -316,8 +316,10 @@ let make_pool ~__context ~master ?(name_label = "") ?(name_description = "") ~repository_proxy_url ~repository_proxy_username ~repository_proxy_password ~migration_compression ~coordinator_bias ~telemetry_uuid ~telemetry_frequency ~telemetry_next_collection ~last_update_sync - ~local_auth_max_threads:8L ~ext_auth_max_threads:8L ~update_sync_frequency - ~update_sync_day ~update_sync_enabled ~recommendations ; + ~local_auth_max_threads:8L ~ext_auth_max_threads:8L + ~ext_auth_cache_enabled:false ~ext_auth_cache_size:50L + ~ext_auth_cache_expiry:300L ~update_sync_frequency ~update_sync_day + ~update_sync_enabled ~recommendations ; pool_ref let default_sm_features = diff --git a/ocaml/xapi/dbsync_master.ml b/ocaml/xapi/dbsync_master.ml index 31f235e7214..aad7434dc02 100644 --- a/ocaml/xapi/dbsync_master.ml +++ b/ocaml/xapi/dbsync_master.ml @@ -54,7 +54,8 @@ let create_pool_record ~__context = ~last_update_sync:Xapi_stdext_date.Date.epoch ~update_sync_frequency:`weekly ~update_sync_day:0L ~update_sync_enabled:false ~local_auth_max_threads:8L - ~ext_auth_max_threads:1L ~recommendations:[] + ~ext_auth_max_threads:1L ~ext_auth_cache_enabled:false + ~ext_auth_cache_size:50L ~ext_auth_cache_expiry:300L ~recommendations:[] let set_master_ip ~__context = let ip = diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index 83d4ff26e24..cbbbdb1f078 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -1166,6 +1166,24 @@ functor value ; Local.Pool.set_ext_auth_max_threads ~__context ~self ~value + let set_ext_auth_cache_enabled ~__context ~self ~value = + info "%s: pool='%s' value='%b'" __FUNCTION__ + (pool_uuid ~__context self) + value ; + Local.Pool.set_ext_auth_cache_enabled ~__context ~self ~value + + let set_ext_auth_cache_size ~__context ~self ~value = + info "%s: pool='%s' value='%Ld'" __FUNCTION__ + (pool_uuid ~__context self) + value ; + Local.Pool.set_ext_auth_cache_size ~__context ~self ~value + + let set_ext_auth_cache_expiry ~__context ~self ~value = + info "%s: pool='%s' value='%Ld'" __FUNCTION__ + (pool_uuid ~__context self) + value ; + Local.Pool.set_ext_auth_cache_expiry ~__context ~self ~value + let get_guest_secureboot_readiness ~__context ~self = info "%s: pool='%s'" __FUNCTION__ (pool_uuid ~__context self) ; Local.Pool.get_guest_secureboot_readiness ~__context ~self diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index 1b2f4c08a75..39b5dbd447b 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -3781,6 +3781,29 @@ let set_local_auth_max_threads ~__context:_ ~self:_ ~value = let set_ext_auth_max_threads ~__context:_ ~self:_ ~value = Xapi_session.set_ext_auth_max_threads value +let set_ext_auth_cache_enabled ~__context ~self ~value:enabled = + Db.Pool.set_ext_auth_cache_enabled ~__context ~self ~value:enabled ; + if not enabled then + Xapi_session.clear_external_auth_cache () + +let set_ext_auth_cache_size ~__context ~self ~value:capacity = + if capacity < 0L then + raise + Api_errors.( + Server_error (invalid_value, ["size"; Int64.to_string capacity]) + ) + else + Db.Pool.set_ext_auth_cache_size ~__context ~self ~value:capacity + +let set_ext_auth_cache_expiry ~__context ~self ~value:expiry_seconds = + if expiry_seconds <= 0L then + raise + Api_errors.( + Server_error (invalid_value, ["expiry"; Int64.to_string expiry_seconds]) + ) + else + Db.Pool.set_ext_auth_cache_expiry ~__context ~self ~value:expiry_seconds + let get_guest_secureboot_readiness ~__context ~self:_ = let auth_files = Sys.readdir !Xapi_globs.varstore_dir in let pk_present = Array.mem "PK.auth" auth_files in diff --git a/ocaml/xapi/xapi_pool.mli b/ocaml/xapi/xapi_pool.mli index 9e74ea3f373..0bd71a22996 100644 --- a/ocaml/xapi/xapi_pool.mli +++ b/ocaml/xapi/xapi_pool.mli @@ -418,6 +418,15 @@ val set_local_auth_max_threads : val set_ext_auth_max_threads : __context:Context.t -> self:API.ref_pool -> value:int64 -> unit +val set_ext_auth_cache_enabled : + __context:Context.t -> self:API.ref_pool -> value:bool -> unit + +val set_ext_auth_cache_size : + __context:Context.t -> self:API.ref_pool -> value:int64 -> unit + +val set_ext_auth_cache_expiry : + __context:Context.t -> self:API.ref_pool -> value:int64 -> unit + val get_guest_secureboot_readiness : __context:Context.t -> self:API.ref_pool From ce7e543d3107fe5d126bd0e31c9722ac10947a88 Mon Sep 17 00:00:00 2001 From: Colin James Date: Thu, 19 Sep 2024 10:38:59 +0100 Subject: [PATCH 029/141] CP-51034: Expose cache options to xe CLI Signed-off-by: Colin James --- ocaml/xapi-cli-server/records.ml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/ocaml/xapi-cli-server/records.ml b/ocaml/xapi-cli-server/records.ml index 3798280d082..cd7e2f5ae80 100644 --- a/ocaml/xapi-cli-server/records.ml +++ b/ocaml/xapi-cli-server/records.ml @@ -1469,6 +1469,31 @@ let pool_record rpc session_id pool = ~get:(fun () -> get_from_map (x ()).API.pool_recommendations) ~get_map:(fun () -> (x ()).API.pool_recommendations) () + ; make_field ~name:"ext-auth-cache-enabled" ~hidden:true + ~get:(fun () -> + (x ()).API.pool_ext_auth_cache_enabled |> string_of_bool + ) + ~set:(fun v -> + Client.Pool.set_ext_auth_cache_enabled ~rpc ~session_id ~self:pool + ~value:(bool_of_string v) + ) + () + ; make_field ~name:"ext-auth-cache-size" ~hidden:true + ~get:(fun () -> (x ()).API.pool_ext_auth_cache_size |> Int64.to_string) + ~set:(fun v -> + Client.Pool.set_ext_auth_cache_size ~rpc ~session_id ~self:pool + ~value:(Int64.of_string v) + ) + () + ; make_field ~name:"ext-auth-cache-expiry" ~hidden:true + ~get:(fun () -> + (x ()).API.pool_ext_auth_cache_expiry |> Int64.to_string + ) + ~set:(fun v -> + Client.Pool.set_ext_auth_cache_expiry ~rpc ~session_id ~self:pool + ~value:(Int64.of_string v) + ) + () ] } From 68744c2ec03dde97da3d198ec8339cfbf269c3e6 Mon Sep 17 00:00:00 2001 From: Colin James Date: Tue, 10 Sep 2024 13:14:10 +0100 Subject: [PATCH 030/141] CP-50134: Replace Xapi_globs usage w/ pool config Replaces the usages of Xapi_globs within external authentication cache-related code to query the pool object for its configuration options. Signed-off-by: Colin James --- ocaml/xapi/xapi_session.ml | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index 72a0ff7c705..fd35e6a7c76 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -791,11 +791,17 @@ module Caching = struct disabled. This function exists to delay the construction of the cache, as Xapi_globs configuration is not guaranteed to have been populated before the top-level code of this module is executed. *) - let get_or_init_cache () = - if not !Xapi_globs.external_authentication_cache_enabled then + let get_or_init_cache ~__context = + let pool = Helpers.get_pool ~__context in + let cache_enabled = + Db.Pool.get_ext_auth_cache_enabled ~__context ~self:pool + in + if not cache_enabled then None else - let capacity = !Xapi_globs.external_authentication_cache_size in + let capacity = + Db.Pool.get_ext_auth_cache_size ~__context ~self:pool |> Int64.to_int + in let@ () = with_lock lock in match !cache with | Some _ as extant -> @@ -808,8 +814,8 @@ module Caching = struct (* Try to insert into cache. The cache could have been disabled during query to external authentication plugin. *) - let insert_into_cache username password result = - match get_or_init_cache () with + let insert_into_cache ~__context username password result = + match get_or_init_cache ~__context with | None -> () | Some cache -> @@ -818,13 +824,13 @@ module Caching = struct (* Consult the cache or rely on a provided "slow path". Each time the slow path is invoked, an attempt is made to cache its result. *) - let memoize username password ~slow_path = + let memoize ~__context username password ~slow_path = let slow_path () = let ext_auth_result = slow_path () in - insert_into_cache username password ext_auth_result ; + insert_into_cache ~__context username password ext_auth_result ; ext_auth_result in - match get_or_init_cache () with + match get_or_init_cache ~__context with | None -> slow_path () | Some cache -> ( @@ -1206,7 +1212,8 @@ let login_with_password ~__context ~uname ~pwd ~version:_ ~originator = ; subject_name ; rbac_permissions } = - Caching.memoize uname pwd ~slow_path:query_external_auth + Caching.memoize ~__context uname pwd + ~slow_path:query_external_auth in login_no_password_common ~__context ~uname:(Some uname) ~originator From 19c6371ad7f9e8c9b08e88546212d0247fbdba1c Mon Sep 17 00:00:00 2001 From: Colin James Date: Tue, 10 Sep 2024 13:35:55 +0100 Subject: [PATCH 031/141] CP-51034: Inject expiry instead of using global Modifies external authentication cache-related code to no longer rely on a globally-specified expiry time (TTL). Instead, caches are constructed with a given expiry time that they will endow entries with for the extent of their lifetime. There are now no usages of Xapi_globs in cache-related code, so those configuration options can be removed. Signed-off-by: Colin James --- ocaml/tests/test_auth_cache.ml | 18 +++++++----------- ocaml/xapi/helpers.ml | 20 ++++++++++++++++---- ocaml/xapi/xapi_session.ml | 19 ++++++++++++------- 3 files changed, 35 insertions(+), 22 deletions(-) diff --git a/ocaml/tests/test_auth_cache.ml b/ocaml/tests/test_auth_cache.ml index b273248eb41..571a4de0da5 100644 --- a/ocaml/tests/test_auth_cache.ml +++ b/ocaml/tests/test_auth_cache.ml @@ -79,7 +79,7 @@ let credentials = let test_cache_similar_passwords () = let user = "user" in let password = "passwordpasswordpassword" in - let cache = Cache.create ~size:1 in + let cache = Cache.create ~size:1 ~ttl:Mtime.Span.(10 * s) in insert cache (user, password, "session") ; for len = String.length password - 1 downto 0 do let password' = String.sub password 0 len in @@ -92,8 +92,8 @@ let test_cache_similar_passwords () = expiration time. *) let test_cache_expiration () = let expiry_seconds = 2 in - (Xapi_globs.external_authentication_expiry := Mtime.Span.(expiry_seconds * s)) ; - let cache = Cache.create ~size:100 in + let ttl = Mtime.Span.(expiry_seconds * s) in + let cache = Cache.create ~size:100 ~ttl in (* Cache all the credentials. *) CS.iter (insert cache) credentials ; (* Immediately check that all the values are cached. *) @@ -112,17 +112,13 @@ let test_cache_expiration () = of cached entries. *) let test_cache_updates_duplicates () = let expiry_seconds = 1 in - (Xapi_globs.external_authentication_expiry := Mtime.Span.(expiry_seconds * s)) ; + let ttl = Mtime.Span.(expiry_seconds * s) in let count = CS.cardinal credentials in - let cache = Cache.create ~size:count in + let cache = Cache.create ~size:count ~ttl in let credentials = CS.to_seq credentials in Seq.iter (insert cache) credentials ; let is_even i = i mod 2 = 0 in (* Elements occurring at even indices will have their TTLs extended. *) - (Xapi_globs.external_authentication_expiry := - let expiry_seconds' = 30 * expiry_seconds in - Mtime.Span.(expiry_seconds' * s) - ) ; Seq.iteri (fun i c -> if is_even i then insert cache c) credentials ; (* Delay for at least as long as the original TTL. *) Thread.delay (float_of_int expiry_seconds) ; @@ -144,9 +140,9 @@ let test_cache_updates_duplicates () = By the end, the cache must have iteratively evicted each previous entry and should only contain elements of c'_1, c'_2, ..., c'_N. *) let test_cache_eviction () = - (Xapi_globs.external_authentication_expiry := Mtime.Span.(30 * s)) ; + let ttl = Mtime.Span.(30 * s) in let count = CS.cardinal credentials in - let cache = Cache.create ~size:count in + let cache = Cache.create ~size:count ~ttl in CS.iter (insert cache) credentials ; (* Augment each of the credentials *) let change = ( ^ ) "_different_" in diff --git a/ocaml/xapi/helpers.ml b/ocaml/xapi/helpers.ml index ab39410bb91..0a32a8af1d3 100644 --- a/ocaml/xapi/helpers.ml +++ b/ocaml/xapi/helpers.ml @@ -2262,7 +2262,7 @@ module AuthenticationCache = struct type session - val create : size:int -> t + val create : size:int -> ttl:Mtime.span -> t val cache : t -> user -> password -> session -> unit @@ -2282,13 +2282,25 @@ module AuthenticationCache = struct type session = Secret.secret - type t = {cache: Q.t; mutex: Mutex.t; elapsed: Mtime_clock.counter} + type t = { + cache: Q.t + ; mutex: Mutex.t + ; elapsed: Mtime_clock.counter + (* Counter that can be queried to + find out how much time has elapsed since the cache's + construction. This is used as a reference point when creating and + comparing expiration spans on cache entries. *) + ; ttl: Mtime.span + (* Time-to-live associated with each cached entry. Once + this time elapses, the entry is invalidated.*) + } - let create ~size = + let create ~size ~ttl = { cache= Q.create ~capacity:size ; mutex= Mutex.create () ; elapsed= Mtime_clock.counter () + ; ttl } let with_lock m f = @@ -2304,7 +2316,7 @@ module AuthenticationCache = struct let@ () = with_lock t.mutex in let expires = let elapsed = Mtime_clock.count t.elapsed in - let timeout = !Xapi_globs.external_authentication_expiry in + let timeout = t.ttl in Mtime.Span.add elapsed timeout in let salt = Secret.create_salt () in diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index fd35e6a7c76..050ae35ceef 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -788,9 +788,7 @@ module Caching = struct let ( let@ ) = ( @@ ) (* Attain the extant cache or get nothing if caching is - disabled. This function exists to delay the construction of the - cache, as Xapi_globs configuration is not guaranteed to have been - populated before the top-level code of this module is executed. *) + disabled. *) let get_or_init_cache ~__context = let pool = Helpers.get_pool ~__context in let cache_enabled = @@ -799,15 +797,22 @@ module Caching = struct if not cache_enabled then None else - let capacity = - Db.Pool.get_ext_auth_cache_size ~__context ~self:pool |> Int64.to_int - in let@ () = with_lock lock in match !cache with | Some _ as extant -> extant | _ -> - let auth_cache = AuthenticationCache.create ~size:capacity in + let capacity = + Db.Pool.get_ext_auth_cache_size ~__context ~self:pool + |> Int64.to_int + in + let ttl = + Db.Pool.get_ext_auth_cache_expiry ~__context ~self:pool + |> Int64.unsigned_to_int + |> Option.map (fun sec -> Mtime.Span.(sec * s)) + |> Option.value ~default:Mtime.Span.(5 * min) + in + let auth_cache = AuthenticationCache.create ~size:capacity ~ttl in let instance = Some auth_cache in cache := instance ; instance From 1c00a73bf77e605197e588bdc38688d8dd97f641 Mon Sep 17 00:00:00 2001 From: Colin James Date: Tue, 10 Sep 2024 13:58:46 +0100 Subject: [PATCH 032/141] CP-51034: Remove cache options from Xapi_globs Removes the global options that were once used to configure the external authentication cache. This is safe to do as the cache now configures itself from the pool object. Adds a few lines of debug messages. Signed-off-by: Colin James --- ocaml/xapi/xapi_globs.ml | 30 +----------------------------- ocaml/xapi/xapi_session.ml | 4 ++++ 2 files changed, 5 insertions(+), 29 deletions(-) diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index cbaa7430e88..d23d7ec4ce6 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -1040,12 +1040,6 @@ let cert_thumbprint_header_value_sha1 = ref "sha-1:master" let cert_thumbprint_header_response = ref "x-xenapi-response-host-certificate-thumbprint" -let external_authentication_expiry = ref Mtime.Span.(5 * min) - -let external_authentication_cache_enabled = ref false - -let external_authentication_cache_size = ref 50 - let observer_endpoint_http_enabled = ref false let observer_endpoint_https_enabled = ref false @@ -1149,14 +1143,7 @@ let xapi_globs_spec = ; ("test-open", Int test_open) (* for consistency with xenopsd *) ] -let xapi_globs_spec_with_descriptions = - [ - ( "external-authentication-expiry" - , ShortDurationFromSeconds external_authentication_expiry - , "Specify how long externally authenticated login decisions should be \ - cached (in seconds)" - ) - ] +let xapi_globs_spec_with_descriptions = [] let option_of_xapi_globs_spec ?(description = None) (name, ty) = let spec = @@ -1625,21 +1612,6 @@ let other_options = , (fun () -> string_of_bool !disable_webserver) , "Disable the host webserver" ) - ; ( "enable-external-authentication-cache" - , Arg.Set external_authentication_cache_enabled - , (fun () -> string_of_bool !external_authentication_cache_enabled) - , "Enable caching of external authentication decisions" - ) - ; ( "external-authentication-cache-size" - , Arg.Int (fun sz -> external_authentication_cache_size := sz) - , (fun () -> string_of_int !external_authentication_cache_size) - , "Specify the maximum capacity of the external authentication cache" - ) - ; ( "threshold_last_active" - , Arg.Int (fun t -> threshold_last_active := Ptime.Span.of_int_s t) - , (fun () -> Format.asprintf "%a" Ptime.Span.pp !threshold_last_active) - , "Specify the threshold below which we do not refresh the session" - ) ] (* The options can be set with the variable xapiflags in /etc/sysconfig/xapi. diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index 050ae35ceef..4def022bfcc 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -812,6 +812,9 @@ module Caching = struct |> Option.map (fun sec -> Mtime.Span.(sec * s)) |> Option.value ~default:Mtime.Span.(5 * min) in + let span = Format.asprintf "%a" Mtime.Span.pp ttl in + info "Creating authentication cache of capacity %d and TTL of %s" + capacity span ; let auth_cache = AuthenticationCache.create ~size:capacity ~ttl in let instance = Some auth_cache in cache := instance ; @@ -851,6 +854,7 @@ module Caching = struct ) let clear_cache () = + info "Clearing authentication cache" ; let@ () = with_lock lock in cache := None end From 4fcb2f3c8e82fcca1501d663e4e5d662390791f8 Mon Sep 17 00:00:00 2001 From: Benjamin Reis Date: Fri, 20 Sep 2024 08:26:40 +0200 Subject: [PATCH 033/141] Determine IP address type from inventory when creating a PIF Do not assume IPv4 instead see what's in the inventory's `MANAGEMENT_ADDRESS_TYPE`. Signed-off-by: Benjamin Reis --- ocaml/xapi/xapi_pif.ml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_pif.ml b/ocaml/xapi/xapi_pif.ml index 56dff779240..3df1d692b39 100644 --- a/ocaml/xapi/xapi_pif.ml +++ b/ocaml/xapi/xapi_pif.ml @@ -475,6 +475,10 @@ let introduce_internal ?network ?(physical = true) ~t:_ ~__context ~host ~mAC let capabilities = Net.Interface.get_capabilities dbg device in let pci = get_device_pci ~__context ~host ~device in let pif = Ref.make () in + let primary_address_type = + Record_util.primary_address_type_of_string + (Xapi_inventory.lookup Xapi_inventory._management_address_type) + in debug "Creating a new record for NIC: %s: %s" device (Ref.string_of pif) ; let () = Db.PIF.create ~__context ~ref:pif @@ -485,7 +489,7 @@ let introduce_internal ?network ?(physical = true) ~t:_ ~__context ~host ~mAC ~netmask:"" ~gateway:"" ~dNS:"" ~bond_slave_of:Ref.null ~vLAN_master_of ~management:false ~other_config:[] ~disallow_unplug ~ipv6_configuration_mode:`None ~iPv6:[] ~ipv6_gateway:"" - ~primary_address_type:`IPv4 ~managed ~properties:default_properties + ~primary_address_type ~managed ~properties:default_properties ~capabilities ~pCI:pci in (* If I'm a pool slave and this pif represents my management From 92b683260beb687d331562ff95c40adf08bb259a Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Fri, 20 Sep 2024 08:22:30 +0100 Subject: [PATCH 034/141] idl: Make in_product_since default value explicit and unnecessary with lifecycle provided in_product_since was always given a 'Some value' for fields, meaning that the exception for a not provided lifecycle parameter would never be triggered. Force to specify either a lifecycle (for newer fields), or in_product_since (older ones), but not both. No functional change, and all parameters have been made explicit since otherwise this commit would fail to build. Signed-off-by: Andrii Sultanov --- ocaml/idl/datamodel.ml | 556 +++++++++++++++++++--------------- ocaml/idl/datamodel_common.ml | 29 +- ocaml/idl/datamodel_host.ml | 82 ++--- ocaml/idl/datamodel_pool.ml | 30 +- ocaml/idl/datamodel_vm.ml | 124 ++++---- ocaml/idl/datamodel_vtpm.ml | 3 +- 6 files changed, 473 insertions(+), 351 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 737ecc53b0f..d2471e720f2 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -195,13 +195,14 @@ module Session = struct ~contents: [ uid _session - ; field ~qualifier:DynamicRO ~ty:(Ref _host) "this_host" - "Currently connected host" - ; field ~qualifier:DynamicRO ~ty:(Ref _user) "this_user" - "Currently connected user" - ; field ~qualifier:DynamicRO ~ty:DateTime "last_active" - "Timestamp for last time session was active" - ; field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None "pool" + ; field ~qualifier:DynamicRO ~ty:(Ref _host) ~in_product_since:rel_rio + "this_host" "Currently connected host" + ; field ~qualifier:DynamicRO ~ty:(Ref _user) ~in_product_since:rel_rio + "this_user" "Currently connected user" + ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + "last_active" "Timestamp for last time session was active" + ; field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None + ~in_product_since:rel_rio "pool" "True if this session relates to a intra-pool login, false \ otherwise" ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) @@ -384,46 +385,53 @@ module Task = struct ] @ allowed_and_current_operations task_allowed_operations @ [ - field ~qualifier:DynamicRO ~ty:DateTime "created" - "Time task was created" - ; field ~qualifier:DynamicRO ~ty:DateTime "finished" + field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + "created" "Time task was created" + ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + "finished" "Time task finished (i.e. succeeded or failed). If task-status \ is pending, then the value of this field has no meaning" - ; field ~qualifier:DynamicRO ~ty:status_type "status" - "current status of the task" - ; field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO - ~ty:(Ref _session) "session" "the session that created the task" - ; field ~qualifier:DynamicRO ~ty:(Ref _host) "resident_on" - "the host on which the task is running" - ; field ~qualifier:DynamicRO ~ty:Float "progress" + ; field ~qualifier:DynamicRO ~ty:status_type ~in_product_since:rel_rio + "status" "current status of the task" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _session) + "session" "the session that created the task" + ; field ~qualifier:DynamicRO ~ty:(Ref _host) ~in_product_since:rel_rio + "resident_on" "the host on which the task is running" + ; field ~qualifier:DynamicRO ~ty:Float ~in_product_since:rel_rio + "progress" "This field contains the estimated fraction of the task which is \ complete. This field should not be used to determine whether \ the task is complete - for this the status field of the task \ should be used." - ; field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO - ~ty:Int "externalpid" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~internal_only:true ~qualifier:DynamicRO ~ty:Int "externalpid" "If the task has spawned a program, the field record the PID of \ the process that the task is waiting on. (-1 if no waiting \ completion of an external program )" - ; field ~in_oss_since:None ~internal_deprecated_since:rel_boston - ~internal_only:true ~qualifier:DynamicRO ~ty:Int "stunnelpid" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~internal_deprecated_since:rel_boston ~internal_only:true + ~qualifier:DynamicRO ~ty:Int "stunnelpid" "If the task has been forwarded, this field records the pid of \ the stunnel process spawned to manage the forwarding connection" - ; field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO - ~ty:Bool "forwarded" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~internal_only:true ~qualifier:DynamicRO ~ty:Bool "forwarded" "True if this task has been forwarded to a slave" - ; field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO - ~ty:(Ref _host) "forwarded_to" - "The host to which the task has been forwarded" - ; field ~qualifier:DynamicRO ~ty:String "type" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _host) + "forwarded_to" "The host to which the task has been forwarded" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "type" "if the task has completed successfully, this field contains the \ type of the encoded result (i.e. name of the class whose \ reference is in the result field). Undefined otherwise." - ; field ~qualifier:DynamicRO ~ty:String "result" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "result" "if the task has completed successfully, this field contains the \ result value (either Void or an object reference). Undefined \ otherwise." - ; field ~qualifier:DynamicRO ~ty:(Set String) "error_info" + ; field ~qualifier:DynamicRO ~ty:(Set String) + ~in_product_since:rel_rio "error_info" "if the task has failed, this field contains the set of \ associated error strings. Undefined otherwise." ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) @@ -492,8 +500,9 @@ module User = struct ~contents: [ uid _user - ; field ~qualifier:StaticRO "short_name" "short name (e.g. userid)" - ; field "fullname" "full name" + ; field ~qualifier:StaticRO ~in_product_since:rel_rio "short_name" + "short name (e.g. userid)" + ; field ~in_product_since:rel_rio "fullname" "full name" ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" @@ -535,14 +544,16 @@ module Host_crashdump = struct ~contents: [ uid ~in_oss_since:None _host_crashdump - ; field ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _host) "host" - "Host the crashdump relates to" - ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:DateTime "timestamp" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO + ~ty:(Ref _host) "host" "Host the crashdump relates to" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO ~ty:DateTime "timestamp" "Time the crash happened" - ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:Int "size" - "Size of the crashdump" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO ~ty:Int "size" "Size of the crashdump" ; field ~qualifier:StaticRO ~ty:String ~in_oss_since:None - ~internal_only:true "filename" "filename of crash dir" + ~in_product_since:rel_rio ~internal_only:true "filename" + "filename of crash dir" ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" @@ -711,8 +722,8 @@ module Pool_update = struct ~in_oss_since:None ~qualifier:StaticRO ~ty:(Set after_apply_guidance) "after_apply_guidance" "What the client should do after this update has been applied." - ; field ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _vdi) "vdi" - "VDI the update was uploaded to" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO + ~ty:(Ref _vdi) "vdi" "VDI the update was uploaded to" ; field ~in_product_since:rel_ely ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set (Ref _host)) "hosts" "The hosts that have applied this update." @@ -892,18 +903,20 @@ module Host_patch = struct [ uid ~in_oss_since:None _host_patch ; namespace ~name:"name" ~contents:(names None StaticRO) () - ; field ~in_oss_since:None ~qualifier:StaticRO ~ty:String "version" - "Patch version number" - ; field ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _host) "host" - "Host the patch relates to" - ; field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO - ~ty:String "filename" "Filename of the patch" - ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:Bool "applied" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO + ~ty:String "version" "Patch version number" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO + ~ty:(Ref _host) "host" "Host the patch relates to" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~internal_only:true + ~qualifier:DynamicRO ~ty:String "filename" "Filename of the patch" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO ~ty:Bool "applied" "True if the patch has been applied" - ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:DateTime - "timestamp_applied" "Time the patch was applied" - ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:Int "size" - "Size of the patch" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO ~ty:DateTime "timestamp_applied" + "Time the patch was applied" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO ~ty:Int "size" "Size of the patch" ; field ~in_product_since:rel_miami ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _pool_patch) ~default_value:(Some (VRef "")) "pool_patch" "The patch applied" @@ -919,8 +932,8 @@ module Host_metrics = struct let host_metrics_memory = let field = field ~ty:Int in [ - field ~qualifier:DynamicRO "total" "Total host memory (bytes)" - ~doc_tags:[Memory] + field ~qualifier:DynamicRO ~in_product_since:rel_rio "total" + "Total host memory (bytes)" ~doc_tags:[Memory] ; field "free" "Free host memory (bytes)" ~default_value:(Some (VInt 0L)) ~lifecycle: [ @@ -942,9 +955,9 @@ module Host_metrics = struct uid _host_metrics ; namespace ~name:"memory" ~contents:host_metrics_memory () ; field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None "live" - "Pool master thinks this host is live" - ; field ~qualifier:DynamicRO ~ty:DateTime "last_updated" - "Time at which this information was last updated" + ~in_product_since:rel_rio "Pool master thinks this host is live" + ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + "last_updated" "Time at which this information was last updated" ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" @@ -971,28 +984,30 @@ module Host_cpu = struct ~contents: [ uid _hostcpu - ; field ~qualifier:DynamicRO ~ty:(Ref _host) "host" - "the host the CPU is in" - ; field ~qualifier:DynamicRO ~ty:Int "number" + ; field ~qualifier:DynamicRO ~ty:(Ref _host) ~in_product_since:rel_rio + "host" "the host the CPU is in" + ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "number" "the number of the physical CPU within the host" - ; field ~qualifier:DynamicRO ~ty:String "vendor" - "the vendor of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:Int "speed" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "vendor" "the vendor of the physical CPU" + ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "speed" "the speed of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:String "modelname" - "the model name of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:Int "family" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "modelname" "the model name of the physical CPU" + ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "family" "the family (number) of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:Int "model" + ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "model" "the model number of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:String "stepping" - "the stepping of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:String "flags" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "stepping" "the stepping of the physical CPU" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "flags" "the flags of the physical CPU (a decoded version of the features \ field)" - ; field ~qualifier:DynamicRO ~ty:String "features" - "the physical CPU feature bitmap" - ; field ~qualifier:DynamicRO ~persist:false ~ty:Float "utilisation" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "features" "the physical CPU feature bitmap" + ; field ~qualifier:DynamicRO ~persist:false ~ty:Float + ~in_product_since:rel_rio "utilisation" "the current CPU utilisation" ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) @@ -1004,11 +1019,13 @@ end (** Disk and network interfaces are associated with QoS parameters: *) let qos devtype = [ - field "algorithm_type" "QoS algorithm to use" + field ~in_product_since:rel_rio "algorithm_type" "QoS algorithm to use" ; field ~ty:(Map (String, String)) - "algorithm_params" "parameters for chosen QoS algorithm" - ; field ~qualifier:DynamicRO ~ty:(Set String) "supported_algorithms" + ~in_product_since:rel_rio "algorithm_params" + "parameters for chosen QoS algorithm" + ; field ~qualifier:DynamicRO ~ty:(Set String) ~in_product_since:rel_rio + "supported_algorithms" ("supported QoS algorithms for this " ^ devtype) ] @@ -1255,10 +1272,10 @@ module Network = struct ] @ allowed_and_current_operations ~writer_roles:_R_POOL_OP operations @ [ - field ~qualifier:DynamicRO ~ty:(Set (Ref _vif)) "VIFs" - "list of connected vifs" - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _pif)) "PIFs" - "list of connected pifs" + field ~qualifier:DynamicRO ~ty:(Set (Ref _vif)) + ~in_product_since:rel_rio "VIFs" "list of connected vifs" + ; field ~qualifier:DynamicRO ~ty:(Set (Ref _pif)) + ~in_product_since:rel_rio "PIFs" "list of connected pifs" ; field ~qualifier:RW ~ty:Int ~default_value:(Some (VInt 1500L)) ~in_product_since:rel_midnight_ride "MTU" "MTU in octets" ; field ~writer_roles:_R_POOL_OP @@ -1270,6 +1287,7 @@ module Network = struct ; ("XenCenter.CustomFields.*", _R_VM_OP) ; ("XenCenterCreateInProgress", _R_VM_OP) ] + ~in_product_since:rel_rio ; field ~lifecycle: [ @@ -1786,23 +1804,25 @@ module PIF = struct [ uid _pif ; (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO "device" + field ~qualifier:StaticRO ~in_product_since:rel_rio "device" "machine-readable name of the interface (e.g. eth0)" - ; field ~qualifier:StaticRO ~ty:(Ref _network) "network" - "virtual network to which this pif is connected" - ; field ~qualifier:StaticRO ~ty:(Ref _host) "host" - "physical machine to which this pif is connected" + ; field ~qualifier:StaticRO ~ty:(Ref _network) ~in_product_since:rel_rio + "network" "virtual network to which this pif is connected" + ; field ~qualifier:StaticRO ~ty:(Ref _host) ~in_product_since:rel_rio + "host" "physical machine to which this pif is connected" ; (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO "MAC" + field ~qualifier:StaticRO ~in_product_since:rel_rio "MAC" "ethernet MAC address of physical interface" ; (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO ~ty:Int "MTU" "MTU in octets" + field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_rio "MTU" + "MTU in octets" ; (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO ~ty:Int "VLAN" + field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_rio "VLAN" "VLAN tag for all traffic passing through this interface" - ; field ~in_oss_since:None ~internal_only:true "device_name" - "actual dom0 device name" - ; field ~qualifier:DynamicRO ~ty:(Ref _pif_metrics) "metrics" + ; field ~in_oss_since:None ~internal_only:true ~in_product_since:rel_rio + "device_name" "actual dom0 device name" + ; field ~qualifier:DynamicRO ~ty:(Ref _pif_metrics) + ~in_product_since:rel_rio "metrics" "metrics associated with this PIF" ; field ~in_oss_since:None ~ty:Bool ~in_product_since:rel_miami ~qualifier:DynamicRO "physical" @@ -1948,22 +1968,24 @@ module PIF_metrics = struct [ uid _pif_metrics ; namespace ~name:"io" ~contents:iobandwidth () - ; field ~qualifier:DynamicRO ~ty:Bool "carrier" - "Report if the PIF got a carrier or not" - ; field ~qualifier:DynamicRO ~ty:String "vendor_id" "Report vendor ID" - ; field ~qualifier:DynamicRO ~ty:String "vendor_name" - "Report vendor name" - ; field ~qualifier:DynamicRO ~ty:String "device_id" "Report device ID" - ; field ~qualifier:DynamicRO ~ty:String "device_name" - "Report device name" - ; field ~qualifier:DynamicRO ~ty:Int "speed" + ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio + "carrier" "Report if the PIF got a carrier or not" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "vendor_id" "Report vendor ID" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "vendor_name" "Report vendor name" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "device_id" "Report device ID" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "device_name" "Report device name" + ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "speed" "Speed of the link in Mbit/s (if available)" - ; field ~qualifier:DynamicRO ~ty:Bool "duplex" + ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio "duplex" "Full duplex capability of the link (if available)" - ; field ~qualifier:DynamicRO ~ty:String "pci_bus_path" - "PCI bus path of the pif (if available)" - ; field ~qualifier:DynamicRO ~ty:DateTime "last_updated" - "Time at which this information was last updated" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "pci_bus_path" "PCI bus path of the pif (if available)" + ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + "last_updated" "Time at which this information was last updated" ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" @@ -2321,17 +2343,17 @@ module PBD = struct ~contents: [ uid _pbd - ; field ~qualifier:StaticRO ~ty:(Ref _host) "host" - "physical machine on which the pbd is available" - ; field ~qualifier:StaticRO ~ty:(Ref _sr) "SR" + ; field ~qualifier:StaticRO ~ty:(Ref _host) ~in_product_since:rel_rio + "host" "physical machine on which the pbd is available" + ; field ~qualifier:StaticRO ~ty:(Ref _sr) ~in_product_since:rel_rio "SR" "the storage repository that the pbd realises" ; field ~ty:(Map (String, String)) - ~qualifier:StaticRO "device_config" + ~qualifier:StaticRO "device_config" ~in_product_since:rel_rio "a config string to string map that is provided to the host's \ SR-backend-driver" - ; field ~ty:Bool ~qualifier:DynamicRO "currently_attached" - "is the SR currently attached on this host?" + ; field ~ty:Bool ~qualifier:DynamicRO ~in_product_since:rel_rio + "currently_attached" "is the SR currently attached on this host?" ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" @@ -2353,15 +2375,17 @@ let device_status_fields = ) ] "currently_attached" "is the device currently attached (erased on reboot)" - ; field ~ty:Int ~qualifier:DynamicRO "status_code" + ; field ~ty:Int ~qualifier:DynamicRO ~in_product_since:rel_rio "status_code" "error/success code associated with last attach-operation (erased on \ reboot)" - ; field ~ty:String ~qualifier:DynamicRO "status_detail" + ; field ~ty:String ~qualifier:DynamicRO ~in_product_since:rel_rio + "status_detail" "error/success information associated with last attach-operation status \ (erased on reboot)" ; field ~ty:(Map (String, String)) - ~qualifier:DynamicRO "runtime_properties" "Device runtime properties" + ~qualifier:DynamicRO ~in_product_since:rel_rio "runtime_properties" + "Device runtime properties" ] module VIF = struct @@ -2652,21 +2676,24 @@ module VIF = struct ([uid _vif] @ allowed_and_current_operations operations @ [ - field ~qualifier:StaticRO "device" + field ~qualifier:StaticRO ~in_product_since:rel_rio "device" "order in which VIF backends are created by xapi" - ; field ~qualifier:StaticRO ~ty:(Ref _network) "network" + ; field ~qualifier:StaticRO ~ty:(Ref _network) + ~in_product_since:rel_rio "network" "virtual network to which this vif is connected" - ; field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" - "virtual machine to which this vif is connected" - ; field ~qualifier:StaticRO ~ty:String "MAC" + ; field ~qualifier:StaticRO ~ty:(Ref _vm) ~in_product_since:rel_rio + "VM" "virtual machine to which this vif is connected" + ; field ~qualifier:StaticRO ~ty:String ~in_product_since:rel_rio "MAC" "ethernet MAC address of virtual interface, as exposed to guest" - ; field ~qualifier:StaticRO ~ty:Int "MTU" "MTU in octets" - ; field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO - ~ty:Bool "reserved" + ; field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_rio "MTU" + "MTU in octets" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~internal_only:true ~qualifier:DynamicRO ~ty:Bool "reserved" "true if the VIF is reserved pending a reboot/migrate" ; field ~ty:(Map (String, String)) - "other_config" "additional configuration" + ~in_product_since:rel_rio "other_config" + "additional configuration" ] @ device_status_fields @ [namespace ~name:"qos" ~contents:(qos "VIF") ()] @@ -2748,8 +2775,8 @@ module VIF_metrics = struct [ uid _vif_metrics ; namespace ~name:"io" ~contents:iobandwidth () - ; field ~qualifier:DynamicRO ~ty:DateTime "last_updated" - "Time at which this information was last updated" + ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + "last_updated" "Time at which this information was last updated" ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" @@ -2767,17 +2794,19 @@ module Data_source = struct ~contents: [ namespace ~name:"name" ~contents:(names oss_since_303 DynamicRO) () - ; field ~qualifier:DynamicRO ~ty:Bool "enabled" - "true if the data source is being logged" - ; field ~qualifier:DynamicRO ~ty:Bool "standard" + ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio + "enabled" "true if the data source is being logged" + ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio + "standard" "true if the data source is enabled by default. Non-default data \ sources cannot be disabled" - ; field ~qualifier:DynamicRO ~ty:String "units" "the units of the value" - ; field ~qualifier:DynamicRO ~ty:Float "min" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "units" "the units of the value" + ; field ~qualifier:DynamicRO ~ty:Float ~in_product_since:rel_rio "min" "the minimum value of the data source" - ; field ~qualifier:DynamicRO ~ty:Float "max" + ; field ~qualifier:DynamicRO ~ty:Float ~in_product_since:rel_rio "max" "the maximum value of the data source" - ; field ~qualifier:DynamicRO ~ty:Float "value" + ; field ~qualifier:DynamicRO ~ty:Float ~in_product_since:rel_rio "value" "current value of the data source" ] () @@ -3371,23 +3400,29 @@ module SR = struct ] @ allowed_and_current_operations operations @ [ - field ~ty:(Set (Ref _vdi)) ~qualifier:DynamicRO "VDIs" + field ~ty:(Set (Ref _vdi)) ~qualifier:DynamicRO + ~in_product_since:rel_rio "VDIs" "all virtual disks known to this storage repository" - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _pbd)) "PBDs" + ; field ~qualifier:DynamicRO ~ty:(Set (Ref _pbd)) + ~in_product_since:rel_rio "PBDs" "describes how particular hosts can see this storage repository" - ; field ~ty:Int ~qualifier:DynamicRO "virtual_allocation" + ; field ~ty:Int ~qualifier:DynamicRO ~in_product_since:rel_rio + "virtual_allocation" "sum of virtual_sizes of all VDIs in this storage repository (in \ bytes)" - ; field ~ty:Int ~qualifier:DynamicRO "physical_utilisation" + ; field ~ty:Int ~qualifier:DynamicRO ~in_product_since:rel_rio + "physical_utilisation" "physical space currently utilised on this storage repository \ (in bytes). Note that for sparse disk formats, \ physical_utilisation may be less than virtual_allocation" - ; field ~ty:Int ~qualifier:StaticRO "physical_size" - "total physical size of the repository (in bytes)" - ; field ~qualifier:StaticRO "type" "type of the storage repository" - ; field ~qualifier:StaticRO "content_type" + ; field ~ty:Int ~qualifier:StaticRO ~in_product_since:rel_rio + "physical_size" "total physical size of the repository (in bytes)" + ; field ~qualifier:StaticRO ~in_product_since:rel_rio "type" + "type of the storage repository" + ; field ~qualifier:StaticRO ~in_product_since:rel_rio "content_type" "the type of the SR's content, if required (e.g. ISOs)" ; field ~qualifier:DynamicRO "shared" ~ty:Bool + ~in_product_since:rel_rio "true if this SR is (capable of being) shared between multiple \ hosts" ; field @@ -3395,11 +3430,13 @@ module SR = struct "other_config" "additional configuration" ~map_keys_roles: [("folder", _R_VM_OP); ("XenCenter.CustomFields.*", _R_VM_OP)] + ~in_product_since:rel_rio ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes" ; field ~ty:Bool ~qualifier:DynamicRO ~in_oss_since:None - ~internal_only:true "default_vdi_visibility" "" + ~in_product_since:rel_rio ~internal_only:true + "default_vdi_visibility" "" ; field ~in_oss_since:None ~ty:(Map (String, String)) ~in_product_since:rel_miami ~qualifier:RW "sm_config" @@ -3446,16 +3483,20 @@ module SM = struct [ uid _sm ; namespace ~name:"name" ~contents:(names None DynamicRO) () - ; field ~in_oss_since:None ~qualifier:DynamicRO "type" "SR.type" - ; field ~in_oss_since:None ~qualifier:DynamicRO "vendor" - "Vendor who created this plugin" - ; field ~in_oss_since:None ~qualifier:DynamicRO "copyright" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO "type" "SR.type" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO "vendor" "Vendor who created this plugin" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO "copyright" "Entity which owns the copyright of this plugin" - ; field ~in_oss_since:None ~qualifier:DynamicRO "version" - "Version of the plugin" - ; field ~in_oss_since:None ~qualifier:DynamicRO "required_api_version" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO "version" "Version of the plugin" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO "required_api_version" "Minimum SM API version required on the server" - ; field ~in_oss_since:None ~qualifier:DynamicRO + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO ~ty:(Map (String, String)) "configuration" "names and descriptions of device config keys" ; field ~in_oss_since:None ~qualifier:DynamicRO @@ -4410,37 +4451,46 @@ module VDI = struct ] @ allowed_and_current_operations operations @ [ - field ~qualifier:StaticRO ~ty:(Ref _sr) "SR" - "storage repository in which the VDI resides" - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vbd)) "VBDs" + field ~qualifier:StaticRO ~ty:(Ref _sr) ~in_product_since:rel_rio + "SR" "storage repository in which the VDI resides" + ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vbd)) + ~in_product_since:rel_rio "VBDs" "list of vbds that refer to this disk" - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _crashdump)) "crash_dumps" + ; field ~qualifier:DynamicRO ~ty:(Set (Ref _crashdump)) + ~in_product_since:rel_rio "crash_dumps" "list of crash dumps that refer to this disk" - ; field ~qualifier:StaticRO ~ty:Int "virtual_size" + ; field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_rio + "virtual_size" "size of disk as presented to the guest (in bytes). Note that, \ depending on storage backend type, requested size may not be \ respected exactly" - ; field ~qualifier:DynamicRO ~ty:Int "physical_utilisation" + ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio + "physical_utilisation" "amount of physical space that the disk image is currently \ taking up on the storage repository (in bytes)" - ; field ~qualifier:StaticRO ~ty:type' "type" "type of the VDI" - ; field ~qualifier:StaticRO ~ty:Bool "sharable" - "true if this disk may be shared" - ; field ~qualifier:StaticRO ~ty:Bool "read_only" - "true if this disk may ONLY be mounted read-only" + ; field ~qualifier:StaticRO ~ty:type' ~in_product_since:rel_rio "type" + "type of the VDI" + ; field ~qualifier:StaticRO ~ty:Bool ~in_product_since:rel_rio + "sharable" "true if this disk may be shared" + ; field ~qualifier:StaticRO ~ty:Bool ~in_product_since:rel_rio + "read_only" "true if this disk may ONLY be mounted read-only" ; field ~ty:(Map (String, String)) - "other_config" "additional configuration" + ~in_product_since:rel_rio "other_config" + "additional configuration" ~map_keys_roles: [("folder", _R_VM_OP); ("XenCenter.CustomFields.*", _R_VM_OP)] ; field ~qualifier:DynamicRO ~ty:Bool "storage_lock" + ~in_product_since:rel_rio "true if this disk is locked at the storage level" ; (* XXX: location field was in the database in rio, now API in miami *) field ~in_oss_since:None ~in_product_since:rel_miami ~ty:String ~qualifier:DynamicRO ~default_value:(Some (VString "")) "location" "location information" - ; field ~in_oss_since:None ~ty:Bool ~qualifier:DynamicRO "managed" "" - ; field ~in_oss_since:None ~ty:Bool ~qualifier:DynamicRO "missing" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~ty:Bool + ~qualifier:DynamicRO "managed" "" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~ty:Bool + ~qualifier:DynamicRO "missing" "true if SR scan operation reported this VDI as not present on \ disk" ; field ~in_oss_since:None ~ty:(Ref _vdi) ~qualifier:DynamicRO @@ -4684,8 +4734,10 @@ module VBD = struct ([uid _vbd] @ allowed_and_current_operations operations @ [ - field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" "the virtual machine" - ; field ~qualifier:StaticRO ~ty:(Ref _vdi) "VDI" "the virtual disk" + field ~qualifier:StaticRO ~ty:(Ref _vm) ~in_product_since:rel_rio + "VM" "the virtual machine" + ; field ~qualifier:StaticRO ~ty:(Ref _vdi) ~in_product_since:rel_rio + "VDI" "the virtual disk" ; field ~qualifier:StaticRO ~ty:String ~default_value:(Some (VString "")) ~lifecycle: @@ -4698,25 +4750,29 @@ module VBD = struct ) ] "device" "device seen by the guest e.g. hda1" - ; field "userdevice" "user-friendly device name e.g. 0,1,2,etc." - ; field ~ty:Bool "bootable" "true if this VBD is bootable" - ; field ~qualifier:StaticRO ~ty:mode "mode" + ; field ~in_product_since:rel_rio "userdevice" + "user-friendly device name e.g. 0,1,2,etc." + ; field ~ty:Bool ~in_product_since:rel_rio "bootable" + "true if this VBD is bootable" + ; field ~qualifier:StaticRO ~ty:mode ~in_product_since:rel_rio "mode" "the mode the VBD should be mounted with" - ; field ~ty:type' "type" + ; field ~ty:type' ~in_product_since:rel_rio "type" "how the VBD will appear to the guest (e.g. disk or CD)" ; field ~in_oss_since:None ~in_product_since:rel_miami ~ty:Bool ~default_value:(Some (VBool true)) "unpluggable" "true if this VBD will support hot-unplug" - ; field ~qualifier:DynamicRO ~ty:Bool "storage_lock" - "true if a storage level lock was acquired" - ; field ~qualifier:StaticRO ~ty:Bool "empty" + ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio + "storage_lock" "true if a storage level lock was acquired" + ; field ~qualifier:StaticRO ~ty:Bool ~in_product_since:rel_rio "empty" "if true this represents an empty drive" - ; field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO - ~ty:Bool ~default_value:(Some (VBool false)) "reserved" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~internal_only:true ~qualifier:DynamicRO ~ty:Bool + ~default_value:(Some (VBool false)) "reserved" "true if the VBD is reserved pending a reboot/migrate" ; field ~ty:(Map (String, String)) - "other_config" "additional configuration" + ~in_product_since:rel_rio "other_config" + "additional configuration" ] @ device_status_fields @ [namespace ~name:"qos" ~contents:(qos "VBD") ()] @@ -4765,7 +4821,7 @@ module VBD_metrics = struct ; (Removed, rel_tampa, "Disabled in favour of RRD") ] "last_updated" "Time at which this information was last updated" - ; field ~in_product_since:rel_orlando + ; field ~lifecycle: [ (Published, rel_orlando, "") @@ -4796,8 +4852,10 @@ module Crashdump = struct ~contents: [ uid _crashdump - ; field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" "the virtual machine" - ; field ~qualifier:StaticRO ~ty:(Ref _vdi) "VDI" "the virtual disk" + ; field ~qualifier:StaticRO ~ty:(Ref _vm) ~in_product_since:rel_rio "VM" + "the virtual machine" + ; field ~qualifier:StaticRO ~ty:(Ref _vdi) ~in_product_since:rel_rio + "VDI" "the virtual disk" ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" @@ -5061,16 +5119,17 @@ module Console = struct ~contents: [ uid _console - ; field ~qualifier:DynamicRO ~ty:protocol "protocol" - "the protocol used by this console" - ; field ~qualifier:DynamicRO ~ty:String "location" - "URI for the console service" - ; field ~qualifier:DynamicRO ~ty:(Ref _vm) "VM" - "VM to which this console is attached" + ; field ~qualifier:DynamicRO ~ty:protocol ~in_product_since:rel_rio + "protocol" "the protocol used by this console" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "location" "URI for the console service" + ; field ~qualifier:DynamicRO ~ty:(Ref _vm) ~in_product_since:rel_rio + "VM" "VM to which this console is attached" ; field ~ty:(Map (String, String)) - "other_config" "additional configuration" - ; field ~in_oss_since:None ~internal_only:true ~ty:Int "port" + ~in_product_since:rel_rio "other_config" "additional configuration" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~internal_only:true + ~ty:Int "port" "port in dom0 on which the console server is listening" ] () @@ -5079,14 +5138,14 @@ end module VM_metrics = struct let vm_memory_metrics = [ - field ~qualifier:DynamicRO ~ty:Int "actual" + field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "actual" "Guest's actual memory (bytes)" ~persist:false ] let vm_vcpu_metrics = [ - field ~qualifier:DynamicRO ~ty:Int "number" "Current number of VCPUs" - ~persist:true + field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "number" + "Current number of VCPUs" ~persist:true ; field ~qualifier:DynamicRO ~ty:(Map (Int, Float)) ~persist:false "utilisation" @@ -5100,13 +5159,15 @@ module VM_metrics = struct ] ; field ~qualifier:DynamicRO ~ty:(Map (Int, Int)) - "CPU" "VCPU to PCPU map" ~persist:false + ~in_product_since:rel_rio "CPU" "VCPU to PCPU map" ~persist:false ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - "params" "The live equivalent to VM.VCPUs_params" ~persist:false + ~in_product_since:rel_rio "params" + "The live equivalent to VM.VCPUs_params" ~persist:false ; field ~qualifier:DynamicRO ~ty:(Map (Int, Set String)) - "flags" "CPU flags (blocked,online,running)" ~persist:false + ~in_product_since:rel_rio "flags" "CPU flags (blocked,online,running)" + ~persist:false ] let t = @@ -5120,14 +5181,17 @@ module VM_metrics = struct uid _vm_metrics ; namespace ~name:"memory" ~contents:vm_memory_metrics () ; namespace ~name:"VCPUs" ~contents:vm_vcpu_metrics () - ; field ~qualifier:DynamicRO ~ty:(Set String) "state" - "The state of the guest, eg blocked, dying etc" ~persist:false - ; field ~qualifier:DynamicRO ~ty:DateTime "start_time" - "Time at which this VM was last booted" + ; field ~qualifier:DynamicRO ~ty:(Set String) ~in_product_since:rel_rio + "state" "The state of the guest, eg blocked, dying etc" + ~persist:false + ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + "start_time" "Time at which this VM was last booted" ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:DateTime - "install_time" "Time at which the VM was installed" - ; field ~qualifier:DynamicRO ~ty:DateTime "last_updated" - "Time at which this information was last updated" ~persist:false + ~in_product_since:rel_rio "install_time" + "Time at which the VM was installed" + ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + "last_updated" "Time at which this information was last updated" + ~persist:false ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ~persist:false @@ -5182,14 +5246,15 @@ module VM_guest_metrics = struct uid _vm_guest_metrics ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - "os_version" "version of the OS" + ~in_product_since:rel_rio "os_version" "version of the OS" ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) ~lifecycle:[] "netbios_name" "The NETBIOS name of the machine" ~default_value:(Some (VMap [])) ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - "PV_drivers_version" "version of the PV drivers" + ~in_product_since:rel_rio "PV_drivers_version" + "version of the PV drivers" ; field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None ~lifecycle: [ @@ -5232,12 +5297,12 @@ module VM_guest_metrics = struct "disks" "This field exists but has no data." ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - "networks" "network configuration" + ~in_product_since:rel_rio "networks" "network configuration" ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - "other" "anything else" - ; field ~qualifier:DynamicRO ~ty:DateTime "last_updated" - "Time at which this information was last updated" + ~in_product_since:rel_rio "other" "anything else" + ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + "last_updated" "Time at which this information was last updated" ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" @@ -5855,26 +5920,28 @@ module VMSS = struct [ uid _vmss ; namespace ~name:"name" ~contents:(names None RW) () - ; field ~qualifier:RW ~ty:Bool "enabled" + ; field ~qualifier:RW ~ty:Bool ~in_product_since:rel_rio "enabled" "enable or disable this snapshot schedule" ~default_value:(Some (VBool true)) - ; field ~qualifier:StaticRO ~ty:type' "type" + ; field ~qualifier:StaticRO ~ty:type' ~in_product_since:rel_rio "type" "type of the snapshot schedule" - ; field ~qualifier:StaticRO ~ty:Int "retained_snapshots" + ; field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_rio + "retained_snapshots" "maximum number of snapshots that should be stored at any time" ~default_value:(Some (VInt 7L)) - ; field ~qualifier:StaticRO ~ty:frequency "frequency" - "frequency of taking snapshot from snapshot schedule" + ; field ~qualifier:StaticRO ~ty:frequency ~in_product_since:rel_rio + "frequency" "frequency of taking snapshot from snapshot schedule" ; field ~qualifier:StaticRO ~ty:(Map (String, String)) - "schedule" + ~in_product_since:rel_rio "schedule" "schedule of the snapshot containing 'hour', 'min', 'days'. \ Date/time-related information is in Local Timezone" ~default_value:(Some (VMap [])) - ; field ~qualifier:DynamicRO ~ty:DateTime "last_run_time" - "time of the last snapshot" + ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + "last_run_time" "time of the last snapshot" ~default_value:(Some (VDateTime Date.epoch)) - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) "VMs" + ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) + ~in_product_since:rel_rio "VMs" "all VMs attached to this snapshot schedule" ] () @@ -6003,8 +6070,8 @@ module VM_appliance = struct ] @ allowed_and_current_operations operations @ [ - field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) "VMs" - "all VMs in this appliance" + field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) + ~in_product_since:rel_rio "VMs" "all VMs in this appliance" ] ) () @@ -6045,7 +6112,8 @@ module DR_task = struct ~contents: [ uid _dr_task - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _sr)) "introduced_SRs" + ; field ~qualifier:DynamicRO ~ty:(Set (Ref _sr)) + ~in_product_since:rel_rio "introduced_SRs" "All SRs introduced by this appliance" ] () @@ -6187,20 +6255,24 @@ module Event = struct } ; contents= [ - field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:Int "id" + field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:Int + ~in_product_since:rel_rio "id" "An ID, monotonically increasing, and local to the current session" ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:DateTime - ~internal_deprecated_since:rel_boston "timestamp" - "The time at which the event occurred" - ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:String "class" + ~in_product_since:rel_rio ~internal_deprecated_since:rel_boston + "timestamp" "The time at which the event occurred" + ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:String + ~in_product_since:rel_rio "class" "The name of the class of the object that changed" ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:operation - "operation" "The operation that was performed" - ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:String "ref" + ~in_product_since:rel_rio "operation" + "The operation that was performed" + ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:String + ~in_product_since:rel_rio "ref" "A reference to the object that changed" ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:String - ~internal_deprecated_since:rel_boston "obj_uuid" - "The uuid of the object that changed" + ~in_product_since:rel_rio ~internal_deprecated_since:rel_boston + "obj_uuid" "The uuid of the object that changed" ] ; (* As of tampa, the event record has one more field, snapshot, which is the record of the object changed. Due to the difficulty of representing this in the datamodel, the doc is generated manually, @@ -6257,14 +6329,15 @@ module Blob = struct [ uid _blob ; namespace ~name:"name" ~contents:(names oss_since_303 RW) () - ; field ~qualifier:DynamicRO ~ty:Int "size" + ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "size" "Size of the binary data, in bytes" ; field ~writer_roles:_R_POOL_OP ~qualifier:RW ~in_product_since:rel_tampa ~default_value:(Some (VBool false)) ~ty:Bool "public" "True if the blob is publicly accessible" - ; field ~qualifier:StaticRO ~ty:DateTime "last_updated" - "Time at which the data in the blob was last updated" - ; field ~qualifier:StaticRO ~ty:String "mime_type" + ; field ~qualifier:StaticRO ~ty:DateTime ~in_product_since:rel_rio + "last_updated" "Time at which the data in the blob was last updated" + ; field ~qualifier:StaticRO ~ty:String ~in_product_since:rel_rio + "mime_type" "The mime type associated with this object. Defaults to \ 'application/octet-stream' if the empty string is supplied" ] @@ -6399,9 +6472,10 @@ module Message = struct ~contents: [ uid _message - ; field ~qualifier:DynamicRO ~ty:String "name" "The name of the message" - ; field ~qualifier:DynamicRO ~ty:Int "priority" - "The message priority, 0 being low priority" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio "name" + "The name of the message" + ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio + "priority" "The message priority, 0 being low priority" ; field ~qualifier:DynamicRO ~ty:cls ~lifecycle: [ @@ -6409,11 +6483,12 @@ module Message = struct ; (Extended, "1.313.0", "Added Certificate class") ] "cls" "The class of the object this message is associated with" - ; field ~qualifier:DynamicRO ~ty:String "obj_uuid" - "The uuid of the object this message is associated with" - ; field ~qualifier:DynamicRO ~ty:DateTime "timestamp" - "The time at which the message was created" - ; field ~qualifier:DynamicRO ~ty:String "body" "The body of the message" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "obj_uuid" "The uuid of the object this message is associated with" + ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + "timestamp" "The time at which the message was created" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio "body" + "The body of the message" ] () end @@ -6458,11 +6533,12 @@ module Secret = struct ~contents: [ uid ~reader_roles:_R_POOL_OP _secret - ; field ~reader_roles:_R_POOL_OP ~qualifier:RW ~ty:String "value" - "the secret" + ; field ~reader_roles:_R_POOL_OP ~qualifier:RW ~ty:String + ~in_product_since:rel_rio "value" "the secret" ; field ~qualifier:RW ~ty:(Map (String, String)) - "other_config" "other_config" ~default_value:(Some (VMap [])) + ~in_product_since:rel_rio "other_config" "other_config" + ~default_value:(Some (VMap [])) ] () end @@ -7816,8 +7892,8 @@ module VUSB = struct ~ty:(Map (String, String)) ~lifecycle "other_config" "Additional configuration" ~default_value:(Some (VMap [])) - ; field ~qualifier:DynamicRO ~ty:Bool "currently_attached" - "is the device currently attached" + ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio + "currently_attached" "is the device currently attached" ~default_value:(Some (VBool false)) ] ) diff --git a/ocaml/idl/datamodel_common.ml b/ocaml/idl/datamodel_common.ml index 45b9d3068e6..8b34e41c8ec 100644 --- a/ocaml/idl/datamodel_common.ml +++ b/ocaml/idl/datamodel_common.ml @@ -678,11 +678,19 @@ let field ?(in_oss_since = Some "3.0.3") ?in_product_since lifecycle ?(doc_tags = []) name desc = (* in_product_since currently defaults to 'Some rel_rio', for backwards compatibility. * This should eventually become 'None'. *) - let in_product_since = - match in_product_since with None -> Some rel_rio | x -> x + let _ = + match (lifecycle, in_product_since) with + | None, None -> + failwith ("Lifecycle for field '" ^ name ^ "' not specified") + | Some _, Some _ -> + failwith + ("lifecycle is given, in_product_since should not be specified \ + explicitly in " + ^ name + ) + | _, _ -> + () in - if lifecycle = None && in_product_since = None then - failwith ("Lifecycle for field '" ^ name ^ "' not specified") ; let lifecycle = match lifecycle with | None -> @@ -739,7 +747,9 @@ let field ?(in_oss_since = Some "3.0.3") ?in_product_since let uid ?(in_oss_since = Some "3.0.3") ?(reader_roles = None) ?lifecycle _refname = - field ~in_oss_since ?lifecycle ~qualifier:DynamicRO ~ty:String + let in_product_since = if lifecycle = None then Some rel_rio else None in + field ~in_oss_since ?in_product_since ?lifecycle ~qualifier:DynamicRO + ~ty:String ~writer_roles:_R_POOL_ADMIN (* only the system should be able to create/modify uuids *) ~reader_roles "uuid" "Unique identifier/object reference" @@ -748,13 +758,13 @@ let allowed_and_current_operations ?(writer_roles = None) ?(reader_roles = None) operations_type = [ field ~writer_roles ~reader_roles ~persist:false ~in_oss_since:None - ~qualifier:DynamicRO ~ty:(Set operations_type) + ~in_product_since:rel_rio ~qualifier:DynamicRO ~ty:(Set operations_type) ~default_value:(Some (VSet [])) "allowed_operations" "list of the operations allowed in this state. This list is advisory \ only and the server state may have changed by the time this field is \ read by a client." ; field ~writer_roles ~reader_roles ~persist:false ~in_oss_since:None - ~qualifier:DynamicRO + ~in_product_since:rel_rio ~qualifier:DynamicRO ~ty:(Map (String, operations_type)) ~default_value:(Some (VMap [])) "current_operations" "links each of the running tasks using this object (by reference) to a \ @@ -782,9 +792,10 @@ let namespace ?(get_field_writer_roles = fun x -> x) (** Many of the objects have a set of names of various lengths: *) let names ?(writer_roles = None) ?(reader_roles = None) ?lifecycle in_oss_since qual = + let in_product_since = if lifecycle = None then Some rel_rio else None in let field x y = - field x y ~in_oss_since ~qualifier:qual ~writer_roles ~reader_roles - ~default_value:(Some (VString "")) ?lifecycle + field x y ~in_oss_since ?in_product_since ~qualifier:qual ~writer_roles + ~reader_roles ~default_value:(Some (VString "")) ?lifecycle in [ field "label" "a human-readable name" diff --git a/ocaml/idl/datamodel_host.ml b/ocaml/idl/datamodel_host.ml index d48470f3a71..5bb9887a046 100644 --- a/ocaml/idl/datamodel_host.ml +++ b/ocaml/idl/datamodel_host.ml @@ -7,7 +7,7 @@ open Datamodel_types let host_memory = let field = field ~ty:Int in [ - field ~qualifier:DynamicRO "overhead" + field ~qualifier:DynamicRO "overhead" ~in_product_since:rel_rio "Virtualization memory overhead (bytes)." ~default_value:(Some (VInt 0L)) ~doc_tags:[Memory] ] @@ -15,12 +15,14 @@ let host_memory = let api_version = let field' = field ~qualifier:DynamicRO in [ - field' ~ty:Int "major" "major version number" - ; field' ~ty:Int "minor" "minor version number" - ; field' ~ty:String "vendor" "identification of vendor" + field' ~ty:Int ~in_product_since:rel_rio "major" "major version number" + ; field' ~ty:Int ~in_product_since:rel_rio "minor" "minor version number" + ; field' ~ty:String ~in_product_since:rel_rio "vendor" + "identification of vendor" ; field' ~ty:(Map (String, String)) - "vendor_implementation" "details of vendor implementation" + ~in_product_since:rel_rio "vendor_implementation" + "details of vendor implementation" ] let migrate_receive = @@ -1985,68 +1987,76 @@ let t = @ [ namespace ~name:"API_version" ~contents:api_version () ; field ~qualifier:DynamicRO ~ty:Bool "enabled" - "True if the host is currently enabled" + ~in_product_since:rel_rio "True if the host is currently enabled" ; field ~qualifier:StaticRO ~ty:(Map (String, String)) - "software_version" "version strings" + ~in_product_since:rel_rio "software_version" "version strings" ; field ~ty:(Map (String, String)) - "other_config" "additional configuration" + ~in_product_since:rel_rio "other_config" "additional configuration" ~map_keys_roles: [("folder", _R_VM_OP); ("XenCenter.CustomFields.*", _R_VM_OP)] - ; field ~qualifier:StaticRO ~ty:(Set String) "capabilities" - "Xen capabilities" + ; field ~qualifier:StaticRO ~ty:(Set String) ~in_product_since:rel_rio + "capabilities" "Xen capabilities" ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - "cpu_configuration" + ~in_product_since:rel_rio "cpu_configuration" "The CPU configuration on this host. May contain keys such as \ \"nr_nodes\", \"sockets_per_node\", \"cores_per_socket\", or \ \"threads_per_core\"" - ; field ~qualifier:DynamicRO ~ty:String "sched_policy" - "Scheduler policy currently in force on this host" - ; field ~qualifier:DynamicRO ~ty:(Set String) "supported_bootloaders" + ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + "sched_policy" "Scheduler policy currently in force on this host" + ; field ~qualifier:DynamicRO ~ty:(Set String) ~in_product_since:rel_rio + "supported_bootloaders" "a list of the bootloaders installed on the machine" - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) "resident_VMs" + ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) + ~in_product_since:rel_rio "resident_VMs" "list of VMs currently resident on host" ; field ~qualifier:RW ~ty:(Map (String, String)) - "logging" "logging configuration" + ~in_product_since:rel_rio "logging" "logging configuration" ; field ~qualifier:DynamicRO ~ty:(Set (Ref _pif)) ~doc_tags:[Networking] - "PIFs" "physical network interfaces" - ; field ~qualifier:RW ~ty:(Ref _sr) "suspend_image_sr" + ~in_product_since:rel_rio "PIFs" "physical network interfaces" + ; field ~qualifier:RW ~ty:(Ref _sr) ~in_product_since:rel_rio + "suspend_image_sr" "The SR in which VDIs for suspend images are created" - ; field ~qualifier:RW ~ty:(Ref _sr) "crash_dump_sr" - "The SR in which VDIs for crash dumps are created" - ; field ~in_oss_since:None ~qualifier:DynamicRO - ~ty:(Set (Ref _host_crashdump)) "crashdumps" + ; field ~qualifier:RW ~ty:(Ref _sr) ~in_product_since:rel_rio + "crash_dump_sr" "The SR in which VDIs for crash dumps are created" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO ~ty:(Set (Ref _host_crashdump)) "crashdumps" "Set of host crash dumps" - ; field ~in_oss_since:None ~internal_deprecated_since:rel_ely - ~qualifier:DynamicRO ~ty:(Set (Ref _host_patch)) "patches" - "Set of host patches" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~internal_deprecated_since:rel_ely ~qualifier:DynamicRO + ~ty:(Set (Ref _host_patch)) "patches" "Set of host patches" ; field ~in_oss_since:None ~in_product_since:rel_ely ~qualifier:DynamicRO ~ty:(Set (Ref _pool_update)) "updates" "Set of updates" - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _pbd)) "PBDs" - "physical blockdevices" - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _hostcpu)) "host_CPUs" + ; field ~qualifier:DynamicRO ~ty:(Set (Ref _pbd)) + ~in_product_since:rel_rio "PBDs" "physical blockdevices" + ; field ~qualifier:DynamicRO ~ty:(Set (Ref _hostcpu)) + ~in_product_since:rel_rio "host_CPUs" "The physical CPUs on this host" ; field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "cpu_info" "Details about the physical CPUs on this host" - ; field ~in_oss_since:None ~qualifier:RW ~ty:String - ~doc_tags:[Networking] "hostname" "The hostname of this host" - ; field ~in_oss_since:None ~qualifier:RW ~ty:String - ~doc_tags:[Networking] "address" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW + ~ty:String ~doc_tags:[Networking] "hostname" + "The hostname of this host" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW + ~ty:String ~doc_tags:[Networking] "address" "The address by which this host can be contacted from any other \ host in the pool" - ; field ~qualifier:DynamicRO ~ty:(Ref _host_metrics) "metrics" + ; field ~qualifier:DynamicRO ~ty:(Ref _host_metrics) + ~in_product_since:rel_rio "metrics" "metrics associated with this host" - ; field ~in_oss_since:None ~qualifier:DynamicRO + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO ~ty:(Map (String, String)) "license_params" "State of the current license" - ; field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO - ~ty:Int "boot_free_mem" "Free memory on host at boot time" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~internal_only:true + ~qualifier:DynamicRO ~ty:Int "boot_free_mem" + "Free memory on host at boot time" ; field ~in_oss_since:None ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Set String) ~default_value:(Some (VSet [])) "ha_statefiles" diff --git a/ocaml/idl/datamodel_pool.ml b/ocaml/idl/datamodel_pool.ml index 198c8b5a83a..f55f98d47ac 100644 --- a/ocaml/idl/datamodel_pool.ml +++ b/ocaml/idl/datamodel_pool.ml @@ -1287,20 +1287,22 @@ let t = ~contents: ([uid ~in_oss_since:None _pool] @ [ - field ~in_oss_since:None ~qualifier:RW ~ty:String "name_label" - "Short name" - ; field ~in_oss_since:None ~qualifier:RW ~ty:String "name_description" - "Description" - ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Ref _host) "master" + field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW + ~ty:String "name_label" "Short name" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW + ~ty:String "name_description" "Description" + ; field ~in_oss_since:None ~in_product_since:rel_rio + ~qualifier:DynamicRO ~ty:(Ref _host) "master" "The host that is pool master" - ; field ~in_oss_since:None ~qualifier:RW ~ty:(Ref _sr) "default_SR" - "Default SR for VDIs" - ; field ~in_oss_since:None ~qualifier:RW ~ty:(Ref _sr) - "suspend_image_SR" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW + ~ty:(Ref _sr) "default_SR" "Default SR for VDIs" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW + ~ty:(Ref _sr) "suspend_image_SR" "The SR in which VDIs for suspend images are created" - ; field ~in_oss_since:None ~qualifier:RW ~ty:(Ref _sr) "crash_dump_SR" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW + ~ty:(Ref _sr) "crash_dump_SR" "The SR in which VDIs for crash dumps are created" - ; field ~in_oss_since:None + ; field ~in_oss_since:None ~in_product_since:rel_rio ~ty:(Map (String, String)) "other_config" "additional configuration" ~map_keys_roles: @@ -1372,8 +1374,8 @@ let t = ~in_product_since:rel_george ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) "wlb_enabled" "true if workload balancing is enabled on the pool, false otherwise" - ; field ~in_product_since:rel_george ~qualifier:RW ~ty:Bool - ~default_value:(Some (VBool false)) "wlb_verify_cert" + ; field ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) + "wlb_verify_cert" "true if communication with the WLB server should enforce TLS \ certificate verification." ~lifecycle: @@ -1516,7 +1518,7 @@ let t = "Default behaviour during migration, True if stream compression \ should be used" ; field ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool true)) - "coordinator_bias" + ~in_product_since:rel_rio "coordinator_bias" "true if bias against pool master when scheduling vms is enabled, \ false otherwise" ; field ~qualifier:StaticRO ~ty:Int ~default_value:(Some (VInt 8L)) diff --git a/ocaml/idl/datamodel_vm.ml b/ocaml/idl/datamodel_vm.ml index af7aa27b270..377d4c0f667 100644 --- a/ocaml/idl/datamodel_vm.ml +++ b/ocaml/idl/datamodel_vm.ml @@ -21,12 +21,14 @@ let vmpp_deprecated = let pv = [ - field "bootloader" "name of or path to bootloader" - ; field "kernel" "path to the kernel" - ; field "ramdisk" "path to the initrd" - ; field "args" "kernel command-line arguments" - ; field "bootloader_args" "miscellaneous arguments for the bootloader" - ; field ~in_oss_since:None "legacy_args" "to make Zurich guests boot" + field ~in_product_since:rel_rio "bootloader" "name of or path to bootloader" + ; field ~in_product_since:rel_rio "kernel" "path to the kernel" + ; field ~in_product_since:rel_rio "ramdisk" "path to the initrd" + ; field ~in_product_since:rel_rio "args" "kernel command-line arguments" + ; field ~in_product_since:rel_rio "bootloader_args" + "miscellaneous arguments for the bootloader" + ; field ~in_oss_since:None ~in_product_since:rel_rio "legacy_args" + "to make Zurich guests boot" ] (** HVM domain booting *) @@ -39,7 +41,9 @@ let hvm = ; (Deprecated, rel_kolkata, "Replaced by VM.domain_type") ] "boot_policy" "HVM boot policy" - ; field ~ty:(Map (String, String)) "boot_params" "HVM boot params" + ; field ~in_product_since:rel_rio + ~ty:(Map (String, String)) + "boot_params" "HVM boot params" ; field ~writer_roles:_R_VM_POWER_ADMIN ~in_oss_since:None ~ty:Float ~in_product_since:rel_miami ~qualifier:StaticRO "shadow_multiplier" "multiplier applied to the amount of shadow that will be made available \ @@ -50,24 +54,29 @@ let hvm = let guest_memory = let field = field ~ty:Int in [ - field "overhead" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO - "Virtualization memory overhead (bytes)." ~default_value:(Some (VInt 0L)) - ~doc_tags:[Memory] - ; field "target" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO + field "overhead" ~in_product_since:rel_rio ~writer_roles:_R_VM_POWER_ADMIN + ~qualifier:DynamicRO "Virtualization memory overhead (bytes)." + ~default_value:(Some (VInt 0L)) ~doc_tags:[Memory] + ; field "target" ~in_product_since:rel_rio ~writer_roles:_R_VM_POWER_ADMIN + ~qualifier:StaticRO "Dynamically-set memory target (bytes). The value of this field \ indicates the current target for memory available to this VM." ~default_value:(Some (VInt 0L)) ~internal_deprecated_since:rel_midnight_ride ~doc_tags:[Memory] - ; field "static_max" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO + ; field "static_max" ~in_product_since:rel_rio ~writer_roles:_R_VM_POWER_ADMIN + ~qualifier:StaticRO "Statically-set (i.e. absolute) maximum (bytes). The value of this field \ at VM start time acts as a hard limit of the amount of memory a guest \ can use. New values only take effect on reboot." ~doc_tags:[Memory] - ; field "dynamic_max" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO + ; field "dynamic_max" ~in_product_since:rel_rio + ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Dynamic maximum (bytes)" ~doc_tags:[Memory] - ; field "dynamic_min" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO + ; field "dynamic_min" ~in_product_since:rel_rio + ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Dynamic minimum (bytes)" ~doc_tags:[Memory] - ; field "static_min" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO + ; field "static_min" ~in_product_since:rel_rio ~writer_roles:_R_VM_POWER_ADMIN + ~qualifier:StaticRO "Statically-set (i.e. absolute) mininum (bytes). The value of this field \ indicates the least amount of memory this VM can boot with without \ crashing." @@ -118,17 +127,21 @@ let on_normal_exit_behaviour = (** Virtual CPUs *) let vcpus = [ - field + field ~in_product_since:rel_rio ~ty:(Map (String, String)) "params" "configuration parameters for the selected VCPU policy" - ; field ~qualifier:StaticRO ~ty:Int "max" "Max number of VCPUs" - ; field ~qualifier:StaticRO ~ty:Int "at_startup" "Boot number of VCPUs" + ; field ~in_product_since:rel_rio ~qualifier:StaticRO ~ty:Int "max" + "Max number of VCPUs" + ; field ~in_product_since:rel_rio ~qualifier:StaticRO ~ty:Int "at_startup" + "Boot number of VCPUs" ] (** Default actions *) let actions = - let crash = field ~qualifier:StaticRO ~ty:on_crash_behaviour in - let normal = field ~ty:on_normal_exit_behaviour in + let crash = + field ~in_product_since:rel_rio ~qualifier:StaticRO ~ty:on_crash_behaviour + in + let normal = field ~in_product_since:rel_rio ~ty:on_normal_exit_behaviour in let soft = field ~qualifier:RW ~lifecycle:[] ~ty:on_softreboot_behavior ~default_value:(Some (VEnum "soft_reboot")) @@ -1970,9 +1983,9 @@ let t = ) ] ~ty:power_state "power_state" "Current power state of the machine" - ; field ~ty:Int "user_version" + ; field ~ty:Int "user_version" ~in_product_since:rel_rio "Creators of VMs and templates may store version information here." - ; field ~effect:true ~ty:Bool "is_a_template" + ; field ~effect:true ~ty:Bool "is_a_template" ~in_product_since:rel_rio "true if this is a template. Template VMs can never be started, \ they are used only for cloning other VMs" ; field ~ty:Bool ~default_value:(Some (VBool false)) @@ -1994,40 +2007,44 @@ let t = ~ty:(Ref _vdi) "suspend_VDI" "The VDI that a suspend image is stored on. (Only has meaning if \ VM is currently suspended)" - ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO - ~ty:(Ref _host) "resident_on" + ; field ~in_product_since:rel_rio ~writer_roles:_R_VM_POWER_ADMIN + ~qualifier:DynamicRO ~ty:(Ref _host) "resident_on" "the host the VM is currently resident on" ; field ~writer_roles:_R_VM_POWER_ADMIN ~in_oss_since:None - ~qualifier:DynamicRO ~default_value:(Some (VRef null_ref)) - ~ty:(Ref _host) "scheduled_to_be_resident_on" + ~in_product_since:rel_rio ~qualifier:DynamicRO + ~default_value:(Some (VRef null_ref)) ~ty:(Ref _host) + "scheduled_to_be_resident_on" "the host on which the VM is due to be started/resumed/migrated. \ This acts as a memory reservation indicator" ; field ~writer_roles:_R_VM_POWER_ADMIN ~in_oss_since:None - ~ty:(Ref _host) "affinity" + ~in_product_since:rel_rio ~ty:(Ref _host) "affinity" "A host which the VM has some affinity for (or NULL). This is used \ as a hint to the start call when it decides where to run the VM. \ Resource constraints may cause the VM to be started elsewhere." ; namespace ~name:"memory" ~contents:guest_memory () ; namespace ~name:"VCPUs" ~contents:vcpus () ; namespace ~name:"actions" ~contents:actions () - ; field ~writer_roles:_R_POOL_ADMIN ~qualifier:DynamicRO - ~ty:(Set (Ref _console)) "consoles" "virtual console devices" - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vif)) ~doc_tags:[Networking] - "VIFs" "virtual network interfaces" - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vbd)) "VBDs" - "virtual block devices" - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vusb)) "VUSBs" - "virtual usb devices" - ; field ~writer_roles:_R_POOL_ADMIN ~qualifier:DynamicRO - ~ty:(Set (Ref _crashdump)) "crash_dumps" + ; field ~in_product_since:rel_rio ~writer_roles:_R_POOL_ADMIN + ~qualifier:DynamicRO ~ty:(Set (Ref _console)) "consoles" + "virtual console devices" + ; field ~in_product_since:rel_rio ~qualifier:DynamicRO + ~ty:(Set (Ref _vif)) ~doc_tags:[Networking] "VIFs" + "virtual network interfaces" + ; field ~in_product_since:rel_rio ~qualifier:DynamicRO + ~ty:(Set (Ref _vbd)) "VBDs" "virtual block devices" + ; field ~in_product_since:rel_rio ~qualifier:DynamicRO + ~ty:(Set (Ref _vusb)) "VUSBs" "virtual usb devices" + ; field ~in_product_since:rel_rio ~writer_roles:_R_POOL_ADMIN + ~qualifier:DynamicRO ~ty:(Set (Ref _crashdump)) "crash_dumps" "crash dumps associated with this VM" - ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vtpm)) "VTPMs" - "virtual TPMs" + ; field ~in_product_since:rel_rio ~qualifier:DynamicRO + ~ty:(Set (Ref _vtpm)) "VTPMs" "virtual TPMs" ; namespace ~name:"PV" ~contents:pv () ; namespace ~name:"HVM" ~contents:hvm () ; field ~ty:(Map (String, String)) - "platform" "platform-specific configuration" + ~in_product_since:rel_rio "platform" + "platform-specific configuration" ; field ~lifecycle: [ @@ -2035,7 +2052,7 @@ let t = ; (Deprecated, rel_boston, "Field was never used") ] "PCI_bus" "PCI bus path for pass-through devices" - ; field + ; field ~in_product_since:rel_rio ~ty:(Map (String, String)) "other_config" "additional configuration" ~map_keys_roles: @@ -2044,19 +2061,22 @@ let t = ; ("folder", _R_VM_OP) ; ("XenCenter.CustomFields.*", _R_VM_OP) ] - ; field ~qualifier:DynamicRO ~ty:Int "domid" + ; field ~qualifier:DynamicRO ~ty:Int "domid" ~in_product_since:rel_rio "domain ID (if available, -1 otherwise)" - ; field ~qualifier:DynamicRO ~in_oss_since:None ~ty:String "domarch" + ; field ~qualifier:DynamicRO ~in_oss_since:None + ~in_product_since:rel_rio ~ty:String "domarch" "Domain architecture (if available, null string otherwise)" - ; field ~in_oss_since:None ~qualifier:StaticRO + ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO ~ty:(Map (String, String)) ~default_value:(Some (VMap [])) "last_boot_CPU_flags" "describes the CPU flags on which the VM was last booted" - ; field ~qualifier:DynamicRO ~ty:Bool "is_control_domain" + ; field ~in_product_since:rel_rio ~qualifier:DynamicRO ~ty:Bool + "is_control_domain" "true if this is a control domain (domain 0 or a driver domain)" - ; field ~qualifier:DynamicRO ~ty:(Ref _vm_metrics) "metrics" - "metrics associated with this VM" - ; field ~qualifier:DynamicRO ~ty:(Ref _vm_guest_metrics) "guest_metrics" + ; field ~in_product_since:rel_rio ~qualifier:DynamicRO + ~ty:(Ref _vm_metrics) "metrics" "metrics associated with this VM" + ; field ~in_product_since:rel_rio ~qualifier:DynamicRO + ~ty:(Ref _vm_guest_metrics) "guest_metrics" "metrics associated with the running guest" ; (* This was an internal field in Rio, Miami beta1, Miami beta2 but is now exposed so that it will be included automatically in Miami GA exports and can be restored, important if @@ -2078,7 +2098,8 @@ let t = ~qualifier:StaticRO ~ty:String "last_booted_record" "marshalled value containing VM record at time of last boot" ~default_value:(Some (VString "")) - ; field ~in_oss_since:None ~ty:String "recommendations" + ; field ~in_oss_since:None ~in_product_since:rel_rio ~ty:String + "recommendations" "An XML specification of recommended values and ranges for \ properties of this VM" ; field ~effect:true ~in_oss_since:None @@ -2171,8 +2192,9 @@ let t = ~ty:Bool "is_vmss_snapshot" "true if this snapshot was created by the snapshot schedule" ; field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO - ~ty:(Ref _vm_appliance) ~default_value:(Some (VRef null_ref)) - "appliance" "the appliance to which this VM belongs" + ~in_product_since:rel_rio ~ty:(Ref _vm_appliance) + ~default_value:(Some (VRef null_ref)) "appliance" + "the appliance to which this VM belongs" ; field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int "start_delay" diff --git a/ocaml/idl/datamodel_vtpm.ml b/ocaml/idl/datamodel_vtpm.ml index b5278fe5d4e..3c31614a9a1 100644 --- a/ocaml/idl/datamodel_vtpm.ml +++ b/ocaml/idl/datamodel_vtpm.ml @@ -77,9 +77,10 @@ let t = ; allowed_and_current_operations operations ; [ field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" + ~in_product_since:rel_rio "The virtual machine the TPM is attached to" ; field ~qualifier:DynamicRO ~ty:(Ref _vm) "backend" - ~default_value:(Some (VRef null_ref)) + ~in_product_since:rel_rio ~default_value:(Some (VRef null_ref)) "The domain where the backend is located (unused)" ; field ~qualifier:DynamicRO ~ty:persistence_backend ~default_value:(Some (VEnum "xapi")) ~lifecycle:[] From 7f1fff6a23b2ba53f34c975ac3db3fa0be95e22d Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Mon, 23 Sep 2024 13:15:24 +0100 Subject: [PATCH 035/141] idl: Remove in_product_since and internal_deprecated_since parameters for call These were already being replaced with lifecycle equivalents in datamodel_common, search and replace them all and remove parameters as such. Signed-off-by: Andrii Sultanov --- ocaml/idl/datamodel.ml | 1421 +++++++++++++++++++++++----- ocaml/idl/datamodel_common.ml | 32 +- ocaml/idl/datamodel_diagnostics.ml | 28 +- ocaml/idl/datamodel_host.ml | 684 +++++++++++-- ocaml/idl/datamodel_pool.ml | 441 +++++++-- ocaml/idl/datamodel_vm.ml | 731 ++++++++++++-- 6 files changed, 2821 insertions(+), 516 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index d2471e720f2..b72087fb432 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -30,7 +30,15 @@ let api_version_minor = Datamodel_common.api_version_minor module Session = struct let login = - call ~flags:[] ~name:"login_with_password" ~in_product_since:rel_rio + call ~flags:[] ~name:"login_with_password" + ~lifecycle: + [ + ( Published + , rel_rio + , "Attempt to authenticate the user, returning a session reference \ + if successful" + ) + ] ~doc: "Attempt to authenticate the user, returning a session reference if \ successful" @@ -84,12 +92,29 @@ module Session = struct (Ref _host, "host", "Host id of slave") ; (SecretString, "psecret", "Pool secret") ] - ~in_oss_since:None ~in_product_since:rel_rio ~secret:true - ~hide_from_docs:true ~allowed_roles:_R_POOL_ADMIN + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Attempt to authenticate to the pool master by presenting the \ + slave's host ref and pool secret" + ) + ] + ~secret:true ~hide_from_docs:true ~allowed_roles:_R_POOL_ADMIN (*system can create a slave session !!! *) () let slave_local_login = - call ~flags:[] ~in_product_since:rel_miami ~name:"slave_local_login" + call ~flags:[] + ~lifecycle: + [ + ( Published + , rel_miami + , "Authenticate locally against a slave in emergency mode. Note the \ + resulting sessions are only good for use on this host." + ) + ] + ~name:"slave_local_login" ~doc: "Authenticate locally against a slave in emergency mode. Note the \ resulting sessions are only good for use on this host." @@ -99,7 +124,15 @@ module Session = struct ~allowed_roles:_R_POOL_ADMIN (*system can create a slave session*) () let slave_local_login_with_password = - call ~flags:[] ~in_product_since:rel_miami + call ~flags:[] + ~lifecycle: + [ + ( Published + , rel_miami + , "Authenticate locally against a slave in emergency mode. Note the \ + resulting sessions are only good for use on this host." + ) + ] ~name:"slave_local_login_with_password" ~doc: "Authenticate locally against a slave in emergency mode. Note the \ @@ -123,14 +156,17 @@ module Session = struct ~in_oss_since:None ~allowed_roles:_R_LOCAL_ROOT_ONLY () let local_logout = - call ~flags:[`Session] ~in_product_since:rel_miami ~name:"local_logout" - ~doc:"Log out of local session." ~params:[] ~in_oss_since:None - ~allowed_roles:_R_POOL_ADMIN (*system can destroy a local session*) () + call ~flags:[`Session] + ~lifecycle:[(Published, rel_miami, "Log out of local session.")] + ~name:"local_logout" ~doc:"Log out of local session." ~params:[] + ~in_oss_since:None ~allowed_roles:_R_POOL_ADMIN + (*system can destroy a local session*) () let logout = - call ~flags:[`Session] ~in_product_since:rel_rio ~name:"logout" - ~doc:"Log out of a session" ~params:[] ~allowed_roles:_R_ALL - (*any role can destroy a known user session*) () + call ~flags:[`Session] + ~lifecycle:[(Published, rel_rio, "Log out of a session")] + ~name:"logout" ~doc:"Log out of a session" ~params:[] + ~allowed_roles:_R_ALL (*any role can destroy a known user session*) () let change_password = call ~flags:[`Session] ~name:"change_password" @@ -143,8 +179,16 @@ module Session = struct (String, "old_pwd", "Old password for account") ; (String, "new_pwd", "New password for account") ] - ~in_product_since:rel_rio ~in_oss_since:None - ~allowed_roles:_R_LOCAL_ROOT_ONLY + ~lifecycle: + [ + ( Published + , rel_rio + , "Change the account password; if your session is authenticated \ + with root privileges then the old_pwd is validated and the \ + new_pwd is set regardless" + ) + ] + ~in_oss_since:None ~allowed_roles:_R_LOCAL_ROOT_ONLY (*not even pool-admin can change passwords, only root*) () let get_all_subject_identifiers = @@ -156,8 +200,16 @@ module Session = struct ( Set String , "The list of user subject-identifiers of all existing sessions" ) - ~params:[] ~in_product_since:rel_george ~in_oss_since:None - ~allowed_roles:_R_ALL () + ~params:[] + ~lifecycle: + [ + ( Published + , rel_george + , "Return a list of all the user subject-identifiers of all existing \ + sessions" + ) + ] + ~in_oss_since:None ~allowed_roles:_R_ALL () let logout_subject_identifier = call ~name:"logout_subject_identifier" @@ -171,8 +223,16 @@ module Session = struct , "User subject-identifier of the sessions to be destroyed" ) ] - ~in_product_since:rel_george ~in_oss_since:None ~allowed_roles:_R_POOL_OP - () + ~lifecycle: + [ + ( Published + , rel_george + , "Log out all sessions associated to a user subject-identifier, \ + except the session associated with the context calling this \ + function" + ) + ] + ~in_oss_since:None ~allowed_roles:_R_POOL_OP () let t = create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 @@ -266,7 +326,17 @@ module Task = struct ) let cancel = - call ~name:"cancel" ~in_product_since:rel_rio + call ~name:"cancel" + ~lifecycle: + [ + ( Published + , rel_rio + , "Request that a task be cancelled. Note that a task may fail to be \ + cancelled and may complete or fail normally and note that, even \ + when a task does cancel, it might take an arbitrary amount of \ + time." + ) + ] ~doc: "Request that a task be cancelled. Note that a task may fail to be \ cancelled and may complete or fail normally and note that, even when \ @@ -278,7 +348,14 @@ module Task = struct () let create = - call ~flags:[`Session] ~in_oss_since:None ~in_product_since:rel_rio + call ~flags:[`Session] ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Create a new task object which must be manually destroyed." + ) + ] ~name:"create" ~doc:"Create a new task object which must be manually destroyed." ~params: @@ -290,7 +367,8 @@ module Task = struct ~allowed_roles:_R_READ_ONLY (* any subject can create tasks *) () let destroy = - call ~flags:[`Session] ~in_oss_since:None ~in_product_since:rel_rio + call ~flags:[`Session] ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Destroy the task object")] ~name:"destroy" ~doc:"Destroy the task object" ~params:[(Ref _task, "self", "Reference to the task object")] ~allowed_roles:_R_READ_ONLY @@ -298,7 +376,8 @@ module Task = struct () let set_status = - call ~flags:[`Session] ~in_oss_since:None ~in_product_since:rel_falcon + call ~flags:[`Session] ~in_oss_since:None + ~lifecycle:[(Published, rel_falcon, "Set the task status")] ~name:"set_status" ~doc:"Set the task status" ~params: [ @@ -310,7 +389,8 @@ module Task = struct () let set_progress = - call ~flags:[`Session] ~in_oss_since:None ~in_product_since:rel_stockholm + call ~flags:[`Session] ~in_oss_since:None + ~lifecycle:[(Published, rel_stockholm, "Set the task progress")] ~name:"set_progress" ~doc:"Set the task progress" ~params: [ @@ -322,7 +402,8 @@ module Task = struct () let set_result = - call ~flags:[`Session] ~in_oss_since:None ~in_product_since:"21.3.0" + call ~flags:[`Session] ~in_oss_since:None + ~lifecycle:[(Published, "21.3.0", "")] ~name:"set_result" ~doc:"Set the task result" ~params: [ @@ -334,7 +415,8 @@ module Task = struct () let set_error_info = - call ~flags:[`Session] ~in_oss_since:None ~in_product_since:"21.3.0" + call ~flags:[`Session] ~in_oss_since:None + ~lifecycle:[(Published, "21.3.0", "")] ~name:"set_error_info" ~doc:"Set the task error info" ~params: [ @@ -346,7 +428,8 @@ module Task = struct () let set_resident_on = - call ~flags:[`Session] ~in_oss_since:None ~in_product_since:"21.3.0" + call ~flags:[`Session] ~in_oss_since:None + ~lifecycle:[(Published, "21.3.0", "")] ~name:"set_resident_on" ~doc:"Set the resident on field" ~params: [ @@ -519,14 +602,28 @@ module Host_crashdump = struct let destroy = call ~name:"destroy" ~doc:"Destroy specified host crash dump, removing it from the disk." - ~in_oss_since:None ~in_product_since:rel_rio + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Destroy specified host crash dump, removing it from the disk." + ) + ] ~params:[(Ref _host_crashdump, "self", "The host crashdump to destroy")] ~allowed_roles:_R_POOL_OP () let upload = call ~name:"upload" ~doc:"Upload the specified host crash dump to a specified URL" - ~in_oss_since:None ~in_product_since:rel_rio + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Upload the specified host crash dump to a specified URL" + ) + ] ~params: [ (Ref _host_crashdump, "self", "The host crashdump to upload") @@ -598,7 +695,7 @@ module Pool_update = struct let introduce = call ~name:"introduce" ~doc:"Introduce update VDI" ~in_oss_since:None - ~in_product_since:rel_ely + ~lifecycle:[(Published, rel_ely, "Introduce update VDI")] ~params:[(Ref _vdi, "vdi", "The VDI which contains a software update.")] ~result:(Ref _pool_update, "the introduced pool update") ~allowed_roles:_R_POOL_OP () @@ -606,7 +703,14 @@ module Pool_update = struct let precheck = call ~name:"precheck" ~doc:"Execute the precheck stage of the selected update on a host" - ~in_oss_since:None ~in_product_since:rel_ely + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_ely + , "Execute the precheck stage of the selected update on a host" + ) + ] ~params: [ (Ref _pool_update, "self", "The update whose prechecks will be run") @@ -618,7 +722,8 @@ module Pool_update = struct let apply = call ~name:"apply" ~doc:"Apply the selected update to a host" - ~in_oss_since:None ~in_product_since:rel_ely + ~in_oss_since:None + ~lifecycle:[(Published, rel_ely, "Apply the selected update to a host")] ~params: [ (Ref _pool_update, "self", "The update to apply") @@ -630,7 +735,14 @@ module Pool_update = struct let pool_apply = call ~name:"pool_apply" ~doc:"Apply the selected update to all hosts in the pool" - ~in_oss_since:None ~in_product_since:rel_ely + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_ely + , "Apply the selected update to all hosts in the pool" + ) + ] ~params:[(Ref _pool_update, "self", "The update to apply")] ~allowed_roles:_R_POOL_OP () @@ -639,20 +751,36 @@ module Pool_update = struct ~doc: "Removes the update's files from all hosts in the pool, but does not \ revert the update" - ~in_oss_since:None ~in_product_since:rel_ely + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_ely + , "Removes the update's files from all hosts in the pool, but does \ + not revert the update" + ) + ] ~params:[(Ref _pool_update, "self", "The update to clean up")] ~allowed_roles:_R_POOL_OP () let destroy = call ~name:"destroy" ~doc:"Removes the database entry. Only works on unapplied update." - ~in_oss_since:None ~in_product_since:rel_ely + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_ely + , "Removes the database entry. Only works on unapplied update." + ) + ] ~params:[(Ref _pool_update, "self", "The update to destroy")] ~allowed_roles:_R_POOL_OP () let attach = call ~name:"attach" ~hide_from_docs:true ~doc:"Attach the pool update VDI" - ~in_oss_since:None ~in_product_since:rel_ely + ~in_oss_since:None + ~lifecycle:[(Published, rel_ely, "Attach the pool update VDI")] ~versioned_params: [ { @@ -675,14 +803,16 @@ module Pool_update = struct let detach = call ~name:"detach" ~hide_from_docs:true ~doc:"Detach the pool update VDI" - ~in_oss_since:None ~in_product_since:rel_ely + ~in_oss_since:None + ~lifecycle:[(Published, rel_ely, "Detach the pool update VDI")] ~params:[(Ref _pool_update, "self", "The update to be detached")] ~allowed_roles:_R_POOL_OP () let resync_host = call ~name:"resync_host" ~hide_from_docs:true ~doc:"Resync the applied updates of the host" ~in_oss_since:None - ~in_product_since:rel_ely + ~lifecycle: + [(Published, rel_ely, "Resync the applied updates of the host")] ~params:[(Ref _host, "host", "The host to resync the applied updates")] ~allowed_roles:_R_POOL_OP () @@ -764,72 +894,129 @@ module Pool_patch = struct let apply = call ~name:"apply" ~doc:"Apply the selected patch to a host and return its output" - ~in_oss_since:None ~in_product_since:rel_miami + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_miami + , "Apply the selected patch to a host and return its output" + ) + ; (Deprecated, rel_ely, "") + ] ~params: [ (Ref _pool_patch, "self", "The patch to apply") ; (Ref _host, "host", "The host to apply the patch too") ] ~result:(String, "the output of the patch application process") - ~allowed_roles:_R_POOL_OP ~internal_deprecated_since:rel_ely () + ~allowed_roles:_R_POOL_OP () let precheck = call ~name:"precheck" ~doc: "Execute the precheck stage of the selected patch on a host and return \ its output" - ~in_oss_since:None ~in_product_since:rel_miami + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_miami + , "Execute the precheck stage of the selected patch on a host and \ + return its output" + ) + ; (Deprecated, rel_ely, "") + ] ~params: [ (Ref _pool_patch, "self", "The patch whose prechecks will be run") ; (Ref _host, "host", "The host to run the prechecks on") ] ~result:(String, "the output of the patch prechecks") - ~allowed_roles:_R_POOL_OP ~internal_deprecated_since:rel_ely () + ~allowed_roles:_R_POOL_OP () let clean = call ~name:"clean" ~doc:"Removes the patch's files from the server" - ~in_oss_since:None ~in_product_since:rel_miami + ~in_oss_since:None + ~lifecycle: + [ + (Published, rel_miami, "Removes the patch's files from the server") + ; (Deprecated, rel_ely, "") + ] ~params:[(Ref _pool_patch, "self", "The patch to clean up")] - ~allowed_roles:_R_POOL_OP ~internal_deprecated_since:rel_ely () + ~allowed_roles:_R_POOL_OP () let clean_on_host = call ~name:"clean_on_host" ~doc:"Removes the patch's files from the specified host" - ~in_oss_since:None ~in_product_since:rel_tampa + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_tampa + , "Removes the patch's files from the specified host" + ) + ; (Deprecated, rel_ely, "") + ] ~params: [ (Ref _pool_patch, "self", "The patch to clean up") ; (Ref _host, "host", "The host on which to clean the patch") ] - ~allowed_roles:_R_POOL_OP ~internal_deprecated_since:rel_ely () + ~allowed_roles:_R_POOL_OP () let pool_clean = call ~name:"pool_clean" ~doc: "Removes the patch's files from all hosts in the pool, but does not \ remove the database entries" - ~in_oss_since:None ~in_product_since:rel_tampa + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_tampa + , "Removes the patch's files from all hosts in the pool, but does \ + not remove the database entries" + ) + ; (Deprecated, rel_ely, "") + ] ~params:[(Ref _pool_patch, "self", "The patch to clean up")] - ~allowed_roles:_R_POOL_OP ~internal_deprecated_since:rel_ely () + ~allowed_roles:_R_POOL_OP () let destroy = call ~name:"destroy" ~doc: "Removes the patch's files from all hosts in the pool, and removes the \ database entries. Only works on unapplied patches." - ~in_oss_since:None ~in_product_since:rel_miami + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_miami + , "Removes the patch's files from all hosts in the pool, and removes \ + the database entries. Only works on unapplied patches." + ) + ; (Deprecated, rel_ely, "") + ] ~params:[(Ref _pool_patch, "self", "The patch to destroy")] - ~allowed_roles:_R_POOL_OP ~internal_deprecated_since:rel_ely () + ~allowed_roles:_R_POOL_OP () let pool_apply = call ~name:"pool_apply" ~doc: "Apply the selected patch to all hosts in the pool and return a map of \ host_ref -> patch output" - ~in_oss_since:None ~in_product_since:rel_miami + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_miami + , "Apply the selected patch to all hosts in the pool and return a \ + map of host_ref -> patch output" + ) + ; (Deprecated, rel_ely, "") + ] ~params:[(Ref _pool_patch, "self", "The patch to apply")] - ~allowed_roles:_R_POOL_OP ~internal_deprecated_since:rel_ely () + ~allowed_roles:_R_POOL_OP () let t = create_obj ~in_db:true ~in_product_since:rel_miami ~in_oss_since:None @@ -882,16 +1069,30 @@ module Host_patch = struct ~doc: "Destroy the specified host patch, removing it from the disk. This \ does NOT reverse the patch" - ~in_oss_since:None ~in_product_since:rel_rio + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Destroy the specified host patch, removing it from the disk. This \ + does NOT reverse the patch" + ) + ; (Deprecated, rel_miami, "") + ] ~params:[(Ref _host_patch, "self", "The patch to destroy")] - ~internal_deprecated_since:rel_miami ~allowed_roles:_R_POOL_OP () + ~allowed_roles:_R_POOL_OP () let apply = call ~name:"apply" ~doc:"Apply the selected patch and return its output" - ~in_oss_since:None ~in_product_since:rel_rio + ~in_oss_since:None + ~lifecycle: + [ + (Published, rel_rio, "Apply the selected patch and return its output") + ; (Deprecated, rel_miami, "") + ] ~params:[(Ref _host_patch, "self", "The patch to apply")] ~result:(String, "the output of the patch application process") - ~internal_deprecated_since:rel_miami ~allowed_roles:_R_POOL_OP () + ~allowed_roles:_R_POOL_OP () let t = create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None @@ -1062,8 +1263,14 @@ module Network = struct ) ; (Ref _host, "host", "physical machine to which this PIF is connected") ] - ~in_product_since:rel_miami ~hide_from_docs:true ~allowed_roles:_R_POOL_OP - () + ~lifecycle: + [ + ( Published + , rel_miami + , "Makes the network immediately available on a particular host" + ) + ] + ~hide_from_docs:true ~allowed_roles:_R_POOL_OP () let purpose = Enum @@ -1134,14 +1341,29 @@ module Network = struct (* network pool introduce is used to copy network records on pool join -- it's the network analogue of VDI/PIF.pool_introduce *) let pool_introduce = - call ~name:"pool_introduce" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"pool_introduce" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Create a new network record in the database only" + ) + ] ~versioned_params:(introduce_params miami_release) ~doc:"Create a new network record in the database only" ~result:(Ref _network, "The ref of the newly created network record.") ~hide_from_docs:true ~allowed_roles:_R_POOL_OP () let create_new_blob = - call ~name:"create_new_blob" ~in_product_since:rel_orlando + call ~name:"create_new_blob" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Create a placeholder for a named binary blob of data that is \ + associated with this pool" + ) + ] ~doc: "Create a placeholder for a named binary blob of data that is \ associated with this pool" @@ -1183,7 +1405,14 @@ module Network = struct ~allowed_roles:_R_POOL_OP () let set_default_locking_mode = - call ~name:"set_default_locking_mode" ~in_product_since:rel_tampa + call ~name:"set_default_locking_mode" + ~lifecycle: + [ + ( Published + , rel_tampa + , "Set the default locking mode for VIFs attached to this network" + ) + ] ~doc:"Set the default locking mode for VIFs attached to this network" ~params: [ @@ -1206,8 +1435,14 @@ module Network = struct ) ; (Ref _vm, "vm", "The virtual machine") ] - ~in_product_since:rel_tampa ~hide_from_docs:true - ~allowed_roles:_R_VM_POWER_ADMIN () + ~lifecycle: + [ + ( Published + , rel_tampa + , "Attaches all networks needed by a given VM on a particular host" + ) + ] + ~hide_from_docs:true ~allowed_roles:_R_VM_POWER_ADMIN () let detach_for_vm = call ~name:"detach_for_vm" @@ -1220,8 +1455,14 @@ module Network = struct ) ; (Ref _vm, "vm", "The virtual machine") ] - ~in_product_since:rel_tampa ~hide_from_docs:true - ~allowed_roles:_R_VM_POWER_ADMIN () + ~lifecycle: + [ + ( Published + , rel_tampa + , "Detaches all networks of a given VM from a particular host" + ) + ] + ~hide_from_docs:true ~allowed_roles:_R_VM_POWER_ADMIN () let add_purpose = call ~name:"add_purpose" @@ -1232,7 +1473,14 @@ module Network = struct ; (purpose, "value", "The purpose to add") ] ~errs:[Api_errors.network_incompatible_purposes] - ~in_product_since:rel_inverness ~allowed_roles:_R_POOL_ADMIN () + ~lifecycle: + [ + ( Published + , rel_inverness + , "Give a network a new purpose (if not present already)" + ) + ] + ~allowed_roles:_R_POOL_ADMIN () let remove_purpose = call ~name:"remove_purpose" @@ -1242,7 +1490,14 @@ module Network = struct (Ref _network, "self", "The network") ; (purpose, "value", "The purpose to remove") ] - ~in_product_since:rel_inverness ~allowed_roles:_R_POOL_ADMIN () + ~lifecycle: + [ + ( Published + , rel_inverness + , "Remove a purpose from a network (if present)" + ) + ] + ~allowed_roles:_R_POOL_ADMIN () (** A virtual network *) let t = @@ -1333,7 +1588,7 @@ end module PIF = struct let create_VLAN = - call ~name:"create_VLAN" ~in_product_since:rel_rio + call ~name:"create_VLAN" ~doc: "Create a VLAN interface from an existing physical interface. This \ call is deprecated: use VLAN.create instead" @@ -1360,10 +1615,10 @@ module PIF = struct ] ~result:(Ref _pif, "The reference of the created PIF object") ~errs:[Api_errors.vlan_tag_invalid] - ~internal_deprecated_since:rel_miami ~allowed_roles:_R_POOL_OP () + ~allowed_roles:_R_POOL_OP () let destroy = - call ~name:"destroy" ~in_product_since:rel_rio + call ~name:"destroy" ~doc: "Destroy the PIF object (provided it is a VLAN interface). This call \ is deprecated: use VLAN.destroy or Bond.destroy instead" @@ -1377,19 +1632,23 @@ module PIF = struct ] ~params:[(Ref _pif, "self", "the PIF object to destroy")] ~errs:[Api_errors.pif_is_physical] - ~internal_deprecated_since:rel_miami ~allowed_roles:_R_POOL_OP () + ~allowed_roles:_R_POOL_OP () let plug = call ~name:"plug" ~doc:"Attempt to bring up a physical interface" ~params:[(Ref _pif, "self", "the PIF object to plug")] - ~in_product_since:rel_miami ~allowed_roles:_R_POOL_OP + ~lifecycle: + [(Published, rel_miami, "Attempt to bring up a physical interface")] + ~allowed_roles:_R_POOL_OP ~errs:[Api_errors.transport_pif_not_configured] () let unplug = call ~name:"unplug" ~doc:"Attempt to bring down a physical interface" ~params:[(Ref _pif, "self", "the PIF object to unplug")] - ~in_product_since:rel_miami ~allowed_roles:_R_POOL_OP + ~lifecycle: + [(Published, rel_miami, "Attempt to bring down a physical interface")] + ~allowed_roles:_R_POOL_OP ~errs: [ Api_errors.ha_operation_would_break_failover_plan @@ -1402,7 +1661,9 @@ module PIF = struct let set_disallow_unplug = call ~name:"set_disallow_unplug" ~doc:"Set whether unplugging the PIF is allowed" ~hide_from_docs:false - ~in_oss_since:None ~in_product_since:rel_orlando + ~in_oss_since:None + ~lifecycle: + [(Published, rel_orlando, "Set whether unplugging the PIF is allowed")] ~params: [ (Ref _pif, "self", "Reference to the object") @@ -1437,7 +1698,14 @@ module PIF = struct ; (String, "gateway", "the new gateway") ; (String, "DNS", "the new DNS settings") ] - ~in_product_since:rel_miami ~allowed_roles:_R_POOL_OP + ~lifecycle: + [ + ( Published + , rel_miami + , "Reconfigure the IP address settings for this interface" + ) + ] + ~allowed_roles:_R_POOL_OP ~errs:Api_errors.[clustering_enabled] () @@ -1503,7 +1771,15 @@ module PIF = struct "Scan for physical interfaces on a host and create PIF objects to \ represent them" ~params:[(Ref _host, "host", "The host on which to scan")] - ~in_product_since:rel_miami ~allowed_roles:_R_POOL_OP () + ~lifecycle: + [ + ( Published + , rel_miami + , "Scan for physical interfaces on a host and create PIF objects to \ + represent them" + ) + ] + ~allowed_roles:_R_POOL_OP () let introduce_params = [ @@ -1542,7 +1818,14 @@ module PIF = struct let introduce = call ~name:"introduce" ~doc:"Create a PIF object matching a particular network interface" - ~versioned_params:introduce_params ~in_product_since:rel_miami + ~versioned_params:introduce_params + ~lifecycle: + [ + ( Published + , rel_miami + , "Create a PIF object matching a particular network interface" + ) + ] ~result:(Ref _pif, "The reference of the created PIF object") ~allowed_roles:_R_POOL_OP () @@ -1550,7 +1833,14 @@ module PIF = struct call ~name:"forget" ~doc:"Destroy the PIF object matching a particular network interface" ~params:[(Ref _pif, "self", "The PIF object to destroy")] - ~in_product_since:rel_miami ~allowed_roles:_R_POOL_OP + ~lifecycle: + [ + ( Published + , rel_miami + , "Destroy the PIF object matching a particular network interface" + ) + ] + ~allowed_roles:_R_POOL_OP ~errs:Api_errors.[pif_tunnel_still_exists; clustering_enabled] () @@ -1721,21 +2011,31 @@ module PIF = struct (* PIF pool introduce is used to copy PIF records on pool join -- it's the PIF analogue of VDI.pool_introduce *) let pool_introduce = - call ~name:"pool_introduce" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"pool_introduce" ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "Create a new PIF record in the database only")] ~versioned_params:(pool_introduce_params miami_release) ~doc:"Create a new PIF record in the database only" ~result:(Ref _pif, "The ref of the newly created PIF record.") ~hide_from_docs:true ~allowed_roles:_R_POOL_OP () let db_introduce = - call ~name:"db_introduce" ~in_oss_since:None ~in_product_since:rel_orlando + call ~name:"db_introduce" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_orlando + , "Create a new PIF record in the database only" + ) + ] ~versioned_params:(pool_introduce_params orlando_release) ~doc:"Create a new PIF record in the database only" ~result:(Ref _pif, "The ref of the newly created PIF record.") ~hide_from_docs:false ~allowed_roles:_R_POOL_OP () let db_forget = - call ~name:"db_forget" ~in_oss_since:None ~in_product_since:rel_orlando + call ~name:"db_forget" ~in_oss_since:None + ~lifecycle:[(Published, rel_orlando, "Destroy a PIF database record.")] ~params: [ ( Ref _pif @@ -2051,12 +2351,14 @@ module Bond = struct } ] ~result:(Ref _bond, "The reference of the created Bond object") - ~in_product_since:rel_miami ~allowed_roles:_R_POOL_OP () + ~lifecycle:[(Published, rel_miami, "Create an interface bond")] + ~allowed_roles:_R_POOL_OP () let destroy = call ~name:"destroy" ~doc:"Destroy an interface bond" ~params:[(Ref _bond, "self", "Bond to destroy")] - ~in_product_since:rel_miami ~allowed_roles:_R_POOL_OP () + ~lifecycle:[(Published, rel_miami, "Destroy an interface bond")] + ~allowed_roles:_R_POOL_OP () let set_mode = call ~name:"set_mode" ~doc:"Change the bond mode" @@ -2073,7 +2375,9 @@ module Bond = struct ; (String, "name", "The property name") ; (String, "value", "The property value") ] - ~in_product_since:rel_tampa ~allowed_roles:_R_POOL_OP () + ~lifecycle: + [(Published, rel_tampa, "Set the value of a property of the bond")] + ~allowed_roles:_R_POOL_OP () let t = create_obj ~in_db:true ~in_product_since:rel_miami ~in_oss_since:None @@ -2162,7 +2466,13 @@ module VLAN = struct (* vlan pool introduce is used to copy management vlan record on pool join -- it's the vlan analogue of VDI/PIF.pool_introduce *) let pool_introduce = call ~name:"pool_introduce" ~in_oss_since:None - ~in_product_since:rel_inverness + ~lifecycle: + [ + ( Published + , rel_inverness + , "Create a new vlan record in the database only" + ) + ] ~versioned_params:(introduce_params inverness_release) ~doc:"Create a new vlan record in the database only" ~result:(Ref _vlan, "The reference of the created VLAN object") @@ -2177,12 +2487,14 @@ module VLAN = struct ; (Ref _network, "network", "Network to receive the untagged traffic") ] ~result:(Ref _vlan, "The reference of the created VLAN object") - ~in_product_since:rel_miami ~allowed_roles:_R_POOL_OP () + ~lifecycle:[(Published, rel_miami, "Create a VLAN mux/demuxer")] + ~allowed_roles:_R_POOL_OP () let destroy = call ~name:"destroy" ~doc:"Destroy a VLAN mux/demuxer" ~params:[(Ref _vlan, "self", "VLAN mux/demuxer to destroy")] - ~in_product_since:rel_miami ~allowed_roles:_R_POOL_OP () + ~lifecycle:[(Published, rel_miami, "Destroy a VLAN mux/demuxer")] + ~allowed_roles:_R_POOL_OP () let t = create_obj ~in_db:true ~in_product_since:rel_miami ~in_oss_since:None @@ -2304,7 +2616,15 @@ end module PBD = struct let plug = - call ~name:"plug" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"plug" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Activate the specified PBD, causing the referenced SR to be \ + attached and scanned" + ) + ] ~doc: "Activate the specified PBD, causing the referenced SR to be attached \ and scanned" @@ -2313,7 +2633,15 @@ module PBD = struct ~allowed_roles:_R_POOL_OP () let unplug = - call ~name:"unplug" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"unplug" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Deactivate the specified PBD, causing the referenced SR to be \ + detached and nolonger scanned" + ) + ] ~doc: "Deactivate the specified PBD, causing the referenced SR to be \ detached and nolonger scanned" @@ -2322,7 +2650,7 @@ module PBD = struct let set_device_config = call ~name:"set_device_config" ~in_oss_since:None - ~in_product_since:rel_miami + ~lifecycle:[(Published, rel_miami, "Sets the PBD's device_config field")] ~params: [ (Ref _pbd, "self", "The PBD to modify") @@ -2416,14 +2744,30 @@ module VIF = struct ) let plug = - call ~name:"plug" ~in_product_since:rel_rio + call ~name:"plug" + ~lifecycle: + [ + ( Published + , rel_rio + , "Hotplug the specified VIF, dynamically attaching it to the \ + running VM" + ) + ] ~doc: "Hotplug the specified VIF, dynamically attaching it to the running VM" ~params:[(Ref _vif, "self", "The VIF to hotplug")] ~allowed_roles:_R_VM_ADMIN () let unplug = - call ~name:"unplug" ~in_product_since:rel_rio + call ~name:"unplug" + ~lifecycle: + [ + ( Published + , rel_rio + , "Hot-unplug the specified VIF, dynamically unattaching it from the \ + running VM" + ) + ] ~doc: "Hot-unplug the specified VIF, dynamically unattaching it from the \ running VM" @@ -2431,13 +2775,22 @@ module VIF = struct ~allowed_roles:_R_VM_ADMIN () let unplug_force = - call ~name:"unplug_force" ~in_product_since:rel_boston + call ~name:"unplug_force" + ~lifecycle:[(Published, rel_boston, "Forcibly unplug the specified VIF")] ~doc:"Forcibly unplug the specified VIF" ~params:[(Ref _vif, "self", "The VIF to forcibly unplug")] ~allowed_roles:_R_VM_ADMIN () let move = - call ~name:"move" ~in_product_since:rel_ely + call ~name:"move" + ~lifecycle: + [ + ( Published + , rel_ely + , "Move the specified VIF to the specified network, even while the \ + VM is running" + ) + ] ~doc: "Move the specified VIF to the specified network, even while the VM is \ running" @@ -2475,7 +2828,8 @@ module VIF = struct ) let set_locking_mode = - call ~name:"set_locking_mode" ~in_product_since:rel_tampa + call ~name:"set_locking_mode" + ~lifecycle:[(Published, rel_tampa, "Set the locking mode for this VIF")] ~doc:"Set the locking mode for this VIF" ~params: [ @@ -2485,7 +2839,15 @@ module VIF = struct ~allowed_roles:_R_POOL_OP () let set_ipv4_allowed = - call ~name:"set_ipv4_allowed" ~in_product_since:rel_tampa + call ~name:"set_ipv4_allowed" + ~lifecycle: + [ + ( Published + , rel_tampa + , "Set the IPv4 addresses to which traffic on this VIF can be \ + restricted" + ) + ] ~doc: "Set the IPv4 addresses to which traffic on this VIF can be restricted" ~params: @@ -2502,7 +2864,9 @@ module VIF = struct ~allowed_roles:_R_POOL_OP () let add_ipv4_allowed = - call ~name:"add_ipv4_allowed" ~in_product_since:rel_tampa + call ~name:"add_ipv4_allowed" + ~lifecycle: + [(Published, rel_tampa, "Associates an IPv4 address with this VIF")] ~doc:"Associates an IPv4 address with this VIF" ~params: [ @@ -2518,7 +2882,9 @@ module VIF = struct ~allowed_roles:_R_POOL_OP () let remove_ipv4_allowed = - call ~name:"remove_ipv4_allowed" ~in_product_since:rel_tampa + call ~name:"remove_ipv4_allowed" + ~lifecycle: + [(Published, rel_tampa, "Removes an IPv4 address from this VIF")] ~doc:"Removes an IPv4 address from this VIF" ~params: [ @@ -2528,7 +2894,15 @@ module VIF = struct ~allowed_roles:_R_POOL_OP () let set_ipv6_allowed = - call ~name:"set_ipv6_allowed" ~in_product_since:rel_tampa + call ~name:"set_ipv6_allowed" + ~lifecycle: + [ + ( Published + , rel_tampa + , "Set the IPv6 addresses to which traffic on this VIF can be \ + restricted" + ) + ] ~doc: "Set the IPv6 addresses to which traffic on this VIF can be restricted" ~params: @@ -2545,7 +2919,9 @@ module VIF = struct ~allowed_roles:_R_POOL_OP () let add_ipv6_allowed = - call ~name:"add_ipv6_allowed" ~in_product_since:rel_tampa + call ~name:"add_ipv6_allowed" + ~lifecycle: + [(Published, rel_tampa, "Associates an IPv6 address with this VIF")] ~doc:"Associates an IPv6 address with this VIF" ~params: [ @@ -2561,7 +2937,9 @@ module VIF = struct ~allowed_roles:_R_POOL_OP () let remove_ipv6_allowed = - call ~name:"remove_ipv6_allowed" ~in_product_since:rel_tampa + call ~name:"remove_ipv6_allowed" + ~lifecycle: + [(Published, rel_tampa, "Removes an IPv6 address from this VIF")] ~doc:"Removes an IPv6 address from this VIF" ~params: [ @@ -2571,7 +2949,14 @@ module VIF = struct ~allowed_roles:_R_POOL_OP () let configure_ipv4 = - call ~name:"configure_ipv4" ~in_product_since:rel_dundee + call ~name:"configure_ipv4" + ~lifecycle: + [ + ( Published + , rel_dundee + , "Configure IPv4 settings for this virtual interface" + ) + ] ~doc:"Configure IPv4 settings for this virtual interface" ~versioned_params: [ @@ -2611,7 +2996,14 @@ module VIF = struct ~allowed_roles:_R_VM_OP () let configure_ipv6 = - call ~name:"configure_ipv6" ~in_product_since:rel_dundee + call ~name:"configure_ipv6" + ~lifecycle: + [ + ( Published + , rel_dundee + , "Configure IPv6 settings for this virtual interface" + ) + ] ~doc:"Configure IPv6 settings for this virtual interface" ~versioned_params: [ @@ -3020,7 +3412,16 @@ module SR = struct } let create = - call ~name:"create" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"create" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Create a new Storage Repository and introduce it into the managed \ + system, creating both SR record and PBD record to attach it to \ + current host (with specified device_config parameters)" + ) + ] ~versioned_params: (host_param :: dev_config_param @@ -3038,7 +3439,17 @@ module SR = struct let destroy_self_param = (Ref _sr, "sr", "The SR to destroy") let destroy = - call ~name:"destroy" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"destroy" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Destroy specified SR, removing SR-record from database and remove \ + SR from disk. (In order to affect this operation the appropriate \ + device_config is read from the specified SR's PBD on current \ + host)" + ) + ] ~doc: "Destroy specified SR, removing SR-record from database and remove SR \ from disk. (In order to affect this operation the appropriate \ @@ -3047,7 +3458,15 @@ module SR = struct ~allowed_roles:_R_POOL_OP () let forget = - call ~name:"forget" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"forget" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Removing specified SR-record from database, without attempting to \ + remove SR from disk" + ) + ] ~doc: "Removing specified SR-record from database, without attempting to \ remove SR from disk" @@ -3055,7 +3474,14 @@ module SR = struct ~allowed_roles:_R_POOL_OP () let introduce = - call ~name:"introduce" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"introduce" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Introduce a new Storage Repository into the managed system" + ) + ] ~versioned_params: ({ param_type= String @@ -3072,7 +3498,19 @@ module SR = struct ~allowed_roles:_R_POOL_OP () let probe = - call ~name:"probe" ~in_oss_since:None ~in_product_since:rel_miami + call ~name:"probe" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_miami + , "Perform a backend-specific scan, using the given device_config. \ + If the device_config is complete, then this will return a list of \ + the SRs present of this type on the device, if any. If the \ + device_config is partial, then a backend-specific scan will be \ + performed, returning results that will guide the user in \ + improving the device_config." + ) + ] ~versioned_params: [ host_param @@ -3130,8 +3568,7 @@ module SR = struct ~allowed_roles:_R_POOL_OP () let make = - call ~name:"make" ~in_oss_since:None ~in_product_since:rel_rio - ~internal_deprecated_since:rel_miami + call ~name:"make" ~in_oss_since:None ~lifecycle: [ (Published, rel_rio, "Create a new Storage Repository on disk") @@ -3150,27 +3587,44 @@ module SR = struct ~allowed_roles:_R_POOL_OP () let get_supported_types = - call ~name:"get_supported_types" ~in_product_since:rel_rio ~flags:[`Session] + call ~name:"get_supported_types" + ~lifecycle: + [ + ( Published + , rel_rio + , "Return a set of all the SR types supported by the system" + ) + ] + ~flags:[`Session] ~doc:"Return a set of all the SR types supported by the system" ~params:[] ~result:(Set String, "the supported SR types") ~allowed_roles:_R_READ_ONLY () let scan = - call ~name:"scan" ~in_product_since:rel_rio + call ~name:"scan" + ~lifecycle: + [ + ( Published + , rel_rio + , "Refreshes the list of VDIs associated with an SR" + ) + ] ~doc:"Refreshes the list of VDIs associated with an SR" ~params:[(Ref _sr, "sr", "The SR to scan")] ~allowed_roles:_R_VM_POWER_ADMIN () (* Nb, although this is a new explicit call, it's actually been in the API since rio - just autogenerated. So no setting of rel_miami. *) let set_shared = - call ~name:"set_shared" ~in_product_since:rel_rio + call ~name:"set_shared" + ~lifecycle:[(Published, rel_rio, "Sets the shared flag on the SR")] ~doc:"Sets the shared flag on the SR" ~params: [(Ref _sr, "sr", "The SR"); (Bool, "value", "True if the SR is shared")] ~allowed_roles:_R_POOL_OP () let set_name_label = - call ~name:"set_name_label" ~in_product_since:rel_rio + call ~name:"set_name_label" + ~lifecycle:[(Published, rel_rio, "Set the name label of the SR")] ~doc:"Set the name label of the SR" ~params: [ @@ -3180,7 +3634,8 @@ module SR = struct ~allowed_roles:_R_POOL_OP () let set_name_description = - call ~name:"set_name_description" ~in_product_since:rel_rio + call ~name:"set_name_description" + ~lifecycle:[(Published, rel_rio, "Set the name description of the SR")] ~doc:"Set the name description of the SR" ~params: [ @@ -3190,7 +3645,15 @@ module SR = struct ~allowed_roles:_R_POOL_OP () let create_new_blob = - call ~name:"create_new_blob" ~in_product_since:rel_orlando + call ~name:"create_new_blob" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Create a placeholder for a named binary blob of data that is \ + associated with this SR" + ) + ] ~doc: "Create a placeholder for a named binary blob of data that is \ associated with this SR" @@ -3233,14 +3696,16 @@ module SR = struct let get_data_sources = call ~name:"get_data_sources" ~in_oss_since:None - ~in_product_since:rel_dundee ~doc:"" + ~lifecycle:[(Published, rel_dundee, "")] + ~doc:"" ~result:(Set (Record _data_source), "A set of data sources") ~params:[(Ref _sr, "sr", "The SR to interrogate")] ~errs:[] ~flags:[`Session] ~allowed_roles:_R_READ_ONLY () let record_data_source = call ~name:"record_data_source" ~in_oss_since:None - ~in_product_since:rel_dundee + ~lifecycle: + [(Published, rel_dundee, "Start recording the specified data source")] ~doc:"Start recording the specified data source" ~params: [ @@ -3251,7 +3716,13 @@ module SR = struct let query_data_source = call ~name:"query_data_source" ~in_oss_since:None - ~in_product_since:rel_dundee + ~lifecycle: + [ + ( Published + , rel_dundee + , "Query the latest value of the specified data source" + ) + ] ~doc:"Query the latest value of the specified data source" ~params: [ @@ -3263,7 +3734,14 @@ module SR = struct let forget_data_source_archives = call ~name:"forget_data_source_archives" ~in_oss_since:None - ~in_product_since:rel_dundee + ~lifecycle: + [ + ( Published + , rel_dundee + , "Forget the recorded statistics related to the specified data \ + source" + ) + ] ~doc:"Forget the recorded statistics related to the specified data source" ~params: [ @@ -3277,7 +3755,8 @@ module SR = struct let set_virtual_allocation = call ~name:"set_virtual_allocation" ~in_oss_since:None - ~in_product_since:rel_miami + ~lifecycle: + [(Published, rel_miami, "Sets the SR's virtual_allocation field")] ~params: [ (Ref _sr, "self", "The SR to modify") @@ -3288,7 +3767,7 @@ module SR = struct let set_physical_size = call ~name:"set_physical_size" ~in_oss_since:None - ~in_product_since:rel_miami + ~lifecycle:[(Published, rel_miami, "Sets the SR's physical_size field")] ~params: [ (Ref _sr, "self", "The SR to modify") @@ -3299,7 +3778,9 @@ module SR = struct let set_physical_utilisation = call ~name:"set_physical_utilisation" ~in_oss_since:None - ~in_product_since:rel_miami ~flags:[`Session] + ~lifecycle: + [(Published, rel_miami, "Sets the SR's physical_utilisation field")] + ~flags:[`Session] ~params: [ (Ref _sr, "self", "The SR to modify") @@ -3309,13 +3790,21 @@ module SR = struct ~allowed_roles:_R_LOCAL_ROOT_ONLY () let update = - call ~name:"update" ~in_oss_since:None ~in_product_since:rel_symc + call ~name:"update" ~in_oss_since:None + ~lifecycle:[(Published, rel_symc, "Refresh the fields on the SR object")] ~params:[(Ref _sr, "sr", "The SR whose fields should be refreshed")] ~doc:"Refresh the fields on the SR object" ~allowed_roles:_R_POOL_OP () let assert_can_host_ha_statefile = call ~name:"assert_can_host_ha_statefile" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Returns successfully if the given SR can host an HA statefile. \ + Otherwise returns an error to explain why not" + ) + ] ~params:[(Ref _sr, "sr", "The SR to query")] ~doc: "Returns successfully if the given SR can host an HA statefile. \ @@ -3324,7 +3813,14 @@ module SR = struct let assert_supports_database_replication = call ~name:"assert_supports_database_replication" ~in_oss_since:None - ~in_product_since:rel_boston + ~lifecycle: + [ + ( Published + , rel_boston + , "Returns successfully if the given SR supports database \ + replication. Otherwise returns an error to explain why not." + ) + ] ~params:[(Ref _sr, "sr", "The SR to query")] ~doc: "Returns successfully if the given SR supports database replication. \ @@ -3333,13 +3829,13 @@ module SR = struct let enable_database_replication = call ~name:"enable_database_replication" ~in_oss_since:None - ~in_product_since:rel_boston + ~lifecycle:[(Published, rel_boston, "")] ~params:[(Ref _sr, "sr", "The SR to which metadata should be replicated")] ~allowed_roles:_R_POOL_OP () let disable_database_replication = call ~name:"disable_database_replication" ~in_oss_since:None - ~in_product_since:rel_boston + ~lifecycle:[(Published, rel_boston, "")] ~params: [ ( Ref _sr @@ -3351,7 +3847,8 @@ module SR = struct let get_live_hosts = call ~in_oss_since:None ~name:"get_live_hosts" - ~in_product_since:rel_stockholm + ~lifecycle: + [(Published, rel_stockholm, "Get all live hosts attached to this SR")] ~doc:"Get all live hosts attached to this SR" ~params:[(Ref _sr, "sr", "The SR from which to query attached hosts")] ~allowed_roles:_R_POOL_OP ~hide_from_docs:true @@ -3469,7 +3966,8 @@ module SM = struct (** XXX: just make this a field and be done with it. Cowardly refusing to change the schema for now. *) let get_driver_filename = call ~name:"get_driver_filename" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [(Published, rel_orlando, "Gets the SM's driver_filename field")] ~params:[(Ref _sm, "self", "The SM to query")] ~result:(String, "The SM's driver_filename field") ~doc:"Gets the SM's driver_filename field" () @@ -3531,7 +4029,17 @@ end module LVHD = struct let enable_thin_provisioning = call ~name:"enable_thin_provisioning" ~in_oss_since:None - ~in_product_since:rel_dundee ~allowed_roles:_R_POOL_ADMIN + ~lifecycle: + [ + ( Published + , rel_dundee + , "Upgrades an LVHD SR to enable thin-provisioning. Future VDIs \ + created in this SR will be thinly-provisioned, although existing \ + VDIs will be left alone. Note that the SR must be attached to the \ + SRmaster for upgrade to work." + ) + ] + ~allowed_roles:_R_POOL_ADMIN ~params: [ ( Ref _host @@ -3644,7 +4152,18 @@ module VDI = struct ) let snapshot = - call ~name:"snapshot" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"snapshot" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Take a read-only snapshot of the VDI, returning a reference to \ + the snapshot. If any driver_params are specified then these are \ + passed through to the storage-specific substrate driver that \ + takes the snapshot. NB the snapshot lives in the same Storage \ + Repository as its parent." + ) + ] ~versioned_params: [ { @@ -3675,7 +4194,18 @@ module VDI = struct ~allowed_roles:_R_VM_ADMIN ~doc_tags:[Snapshots] () let clone = - call ~name:"clone" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"clone" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Take an exact copy of the VDI and return a reference to the new \ + disk. If any driver_params are specified then these are passed \ + through to the storage-specific substrate driver that implements \ + the clone operation. NB the clone lives in the same Storage \ + Repository as its parent." + ) + ] ~params:[(Ref _vdi, "vdi", "The VDI to clone")] ~versioned_params: [ @@ -3706,7 +4236,9 @@ module VDI = struct ~allowed_roles:_R_VM_ADMIN ~doc_tags:[Snapshots] () let resize = - call ~name:"resize" ~in_product_since:rel_rio ~in_oss_since:None + call ~name:"resize" + ~lifecycle:[(Published, rel_rio, "Resize the VDI.")] + ~in_oss_since:None ~params: [ (Ref _vdi, "vdi", "The VDI to resize") @@ -3805,7 +4337,15 @@ module VDI = struct ~allowed_roles:_R_VM_ADMIN () let pool_migrate = - call ~name:"pool_migrate" ~in_oss_since:None ~in_product_since:rel_tampa + call ~name:"pool_migrate" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_tampa + , "Migrate a VDI, which may be attached to a running guest, to a \ + different SR. The destination SR must be visible to the guest." + ) + ] ~params: [ (Ref _vdi, "vdi", "The VDI to migrate") @@ -3952,7 +4492,9 @@ module VDI = struct (* This used to be called VDI.introduce but it was always an internal call *) let pool_introduce = - call ~name:"pool_introduce" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"pool_introduce" ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "Create a new VDI record in the database only")] ~versioned_params: (introduce_params miami_release @ [ @@ -3981,7 +4523,9 @@ module VDI = struct call ~name:"db_forget" ~in_oss_since:None ~params:[(Ref _vdi, "vdi", "The VDI to forget about")] ~doc:"Removes a VDI record from the database" ~hide_from_docs:true - ~in_product_since:rel_miami ~allowed_roles:_R_LOCAL_ROOT_ONLY () + ~lifecycle: + [(Published, rel_miami, "Removes a VDI record from the database")] + ~allowed_roles:_R_LOCAL_ROOT_ONLY () let introduce = call ~name:"introduce" ~in_oss_since:None @@ -3989,17 +4533,29 @@ module VDI = struct ~doc:"Create a new VDI record in the database only" ~result:(Ref _vdi, "The ref of the newly created VDI record.") ~errs:[Api_errors.sr_operation_not_supported] - ~in_product_since:rel_miami ~allowed_roles:_R_VM_ADMIN () + ~lifecycle: + [(Published, rel_miami, "Create a new VDI record in the database only")] + ~allowed_roles:_R_VM_ADMIN () let forget = - call ~name:"forget" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"forget" ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "Removes a VDI record from the database")] ~params:[(Ref _vdi, "vdi", "The VDI to forget about")] ~doc:"Removes a VDI record from the database" ~allowed_roles:_R_VM_ADMIN () let force_unlock = - call ~name:"force_unlock" ~in_oss_since:None ~in_product_since:rel_rio - ~internal_deprecated_since:rel_miami + call ~name:"force_unlock" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Steals the lock on this VDI and leaves it unlocked. This function \ + is extremely dangerous. This call is deprecated." + ) + ; (Deprecated, rel_miami, "") + ] ~params:[(Ref _vdi, "vdi", "The VDI to forcibly unlock")] ~doc: "Steals the lock on this VDI and leaves it unlocked. This function is \ @@ -4012,7 +4568,14 @@ module VDI = struct [(Ref _vdi, "vdi", "The VDI whose stats (eg size) should be updated")] ~doc:"Ask the storage backend to refresh the fields in the VDI object" ~errs:[Api_errors.sr_operation_not_supported] - ~in_product_since:rel_symc ~allowed_roles:_R_VM_ADMIN () + ~lifecycle: + [ + ( Published + , rel_symc + , "Ask the storage backend to refresh the fields in the VDI object" + ) + ] + ~allowed_roles:_R_VM_ADMIN () let operations = Enum @@ -4041,7 +4604,8 @@ module VDI = struct ) let set_missing = - call ~name:"set_missing" ~in_oss_since:None ~in_product_since:rel_miami + call ~name:"set_missing" ~in_oss_since:None + ~lifecycle:[(Published, rel_miami, "Sets the VDI's missing field")] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4051,7 +4615,8 @@ module VDI = struct ~allowed_roles:_R_LOCAL_ROOT_ONLY () let set_read_only = - call ~name:"set_read_only" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"set_read_only" ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Sets the VDI's read_only field")] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4061,7 +4626,8 @@ module VDI = struct ~allowed_roles:_R_VM_ADMIN () let set_sharable = - call ~name:"set_sharable" ~in_oss_since:None ~in_product_since:rel_george + call ~name:"set_sharable" ~in_oss_since:None + ~lifecycle:[(Published, rel_george, "Sets the VDI's sharable field")] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4071,7 +4637,8 @@ module VDI = struct ~allowed_roles:_R_VM_ADMIN () let set_managed = - call ~name:"set_managed" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"set_managed" ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Sets the VDI's managed field")] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4081,7 +4648,8 @@ module VDI = struct ~allowed_roles:_R_LOCAL_ROOT_ONLY () let set_virtual_size = - call ~name:"set_virtual_size" ~in_oss_since:None ~in_product_since:rel_miami + call ~name:"set_virtual_size" ~in_oss_since:None + ~lifecycle:[(Published, rel_miami, "Sets the VDI's virtual_size field")] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4092,7 +4660,8 @@ module VDI = struct let set_physical_utilisation = call ~name:"set_physical_utilisation" ~in_oss_since:None - ~in_product_since:rel_miami + ~lifecycle: + [(Published, rel_miami, "Sets the VDI's physical_utilisation field")] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4103,7 +4672,8 @@ module VDI = struct let set_is_a_snapshot = call ~name:"set_is_a_snapshot" ~in_oss_since:None - ~in_product_since:rel_boston + ~lifecycle: + [(Published, rel_boston, "Sets whether this VDI is a snapshot")] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4116,7 +4686,11 @@ module VDI = struct ~hide_from_docs:true ~allowed_roles:_R_LOCAL_ROOT_ONLY () let set_snapshot_of = - call ~name:"set_snapshot_of" ~in_oss_since:None ~in_product_since:rel_boston + call ~name:"set_snapshot_of" ~in_oss_since:None + ~lifecycle: + [ + (Published, rel_boston, "Sets the VDI of which this VDI is a snapshot") + ] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4127,7 +4701,8 @@ module VDI = struct let set_snapshot_time = call ~name:"set_snapshot_time" ~in_oss_since:None - ~in_product_since:rel_boston + ~lifecycle: + [(Published, rel_boston, "Sets the snapshot time of this VDI.")] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4142,7 +4717,13 @@ module VDI = struct let set_metadata_of_pool = call ~name:"set_metadata_of_pool" ~in_oss_since:None - ~in_product_since:rel_boston + ~lifecycle: + [ + ( Published + , rel_boston + , "Records the pool whose metadata is contained by this VDI." + ) + ] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4158,7 +4739,8 @@ module VDI = struct (** An API call for debugging and testing only *) let generate_config = call ~name:"generate_config" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [(Published, rel_orlando, "Internal function for debugging only")] ~params: [ (Ref _host, "host", "The host on which to generate the configuration") @@ -4181,7 +4763,15 @@ module VDI = struct ) let set_on_boot = - call ~name:"set_on_boot" ~in_oss_since:None ~in_product_since:rel_cowley + call ~name:"set_on_boot" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_cowley + , "Set the value of the on_boot parameter. This value can only be \ + changed when the VDI is not attached to a running VM." + ) + ] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4194,7 +4784,18 @@ module VDI = struct let set_allow_caching = call ~name:"set_allow_caching" ~in_oss_since:None - ~in_product_since:rel_cowley + ~lifecycle: + [ + ( Published + , rel_cowley + , "Set the value of the allow_caching parameter. This value can only \ + be changed when the VDI is not attached to a running VM. The \ + caching behaviour is only affected by this flag for VHD-based \ + VDIs that have one parent and no child VHDs. Moreover, caching \ + only takes place when the host running the VM containing this VDI \ + has a nominated SR for local caching." + ) + ] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4210,7 +4811,15 @@ module VDI = struct ~allowed_roles:_R_VM_ADMIN () let set_name_label = - call ~name:"set_name_label" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"set_name_label" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Set the name label of the VDI. This can only happen when then its \ + SR is currently attached." + ) + ] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4223,7 +4832,14 @@ module VDI = struct let set_name_description = call ~name:"set_name_description" ~in_oss_since:None - ~in_product_since:rel_rio + ~lifecycle: + [ + ( Published + , rel_rio + , "Set the name description of the VDI. This can only happen when \ + its SR is currently attached." + ) + ] ~params: [ (Ref _vdi, "self", "The VDI to modify") @@ -4235,7 +4851,15 @@ module VDI = struct ~allowed_roles:_R_VM_ADMIN () let open_database = - call ~name:"open_database" ~in_oss_since:None ~in_product_since:rel_boston + call ~name:"open_database" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_boston + , "Load the metadata found on the supplied VDI and return a session \ + reference which can be used in API calls to query its contents." + ) + ] ~params: [(Ref _vdi, "self", "The VDI which contains the database to open")] ~result:(Ref _session, "A session which can be used to query the database") @@ -4245,7 +4869,14 @@ module VDI = struct ~allowed_roles:_R_POOL_OP () let checksum = - call ~name:"checksum" ~in_oss_since:None ~in_product_since:rel_boston + call ~name:"checksum" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_boston + , "Internal function to calculate VDI checksum and return a string" + ) + ] ~params:[(Ref _vdi, "self", "The VDI to checksum")] ~result:(String, "The md5sum of the vdi") ~doc:"Internal function to calculate VDI checksum and return a string" @@ -4258,14 +4889,29 @@ module VDI = struct let read_database_pool_uuid = call ~name:"read_database_pool_uuid" ~in_oss_since:None - ~in_product_since:rel_boston + ~lifecycle: + [ + ( Published + , rel_boston + , "Check the VDI cache for the pool UUID of the database on this VDI." + ) + ] ~params:[(Ref _vdi, "self", "The metadata VDI to look up in the cache.")] ~result:(String, "The cached pool UUID of the database on the VDI.") ~doc:"Check the VDI cache for the pool UUID of the database on this VDI." ~allowed_roles:_R_READ_ONLY () let enable_cbt = - call ~name:"enable_cbt" ~in_oss_since:None ~in_product_since:rel_inverness + call ~name:"enable_cbt" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_inverness + , "Enable changed block tracking for the VDI. This call is \ + idempotent - enabling CBT for a VDI for which CBT is already \ + enabled results in a no-op, and no error will be thrown." + ) + ] ~params:[(Ref _vdi, "self", "The VDI for which CBT should be enabled")] ~errs: [ @@ -4284,7 +4930,17 @@ module VDI = struct ~allowed_roles:_R_VM_ADMIN () let disable_cbt = - call ~name:"disable_cbt" ~in_oss_since:None ~in_product_since:rel_inverness + call ~name:"disable_cbt" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_inverness + , "Disable changed block tracking for the VDI. This call is only \ + allowed on VDIs that support enabling CBT. It is an idempotent \ + operation - disabling CBT for a VDI for which CBT is not enabled \ + results in a no-op, and no error will be thrown." + ) + ] ~params:[(Ref _vdi, "self", "The VDI for which CBT should be disabled")] ~errs: [ @@ -4306,7 +4962,7 @@ module VDI = struct (** This command is for internal use by SM to set the cbt_enabled field when it needs to disable cbt for its own reasons. This command should be removed once SMAPIv3 is implemented *) let set_cbt_enabled = call ~name:"set_cbt_enabled" ~in_oss_since:None - ~in_product_since:rel_inverness + ~lifecycle:[(Published, rel_inverness, "")] ~params: [ ( Ref _vdi @@ -4318,7 +4974,18 @@ module VDI = struct ~errs:[] ~hide_from_docs:true ~allowed_roles:_R_LOCAL_ROOT_ONLY () let data_destroy = - call ~name:"data_destroy" ~in_oss_since:None ~in_product_since:rel_inverness + call ~name:"data_destroy" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_inverness + , "Delete the data of the snapshot VDI, but keep its changed block \ + tracking metadata. When successful, this call changes the type of \ + the VDI to cbt_metadata. This operation is idempotent: calling it \ + on a VDI of type cbt_metadata results in a no-op, and no error \ + will be thrown." + ) + ] ~params:[(Ref _vdi, "self", "The VDI whose data should be deleted.")] ~errs: [ @@ -4342,7 +5009,15 @@ module VDI = struct let list_changed_blocks = call ~name:"list_changed_blocks" ~in_oss_since:None - ~in_product_since:rel_inverness + ~lifecycle: + [ + ( Published + , rel_inverness + , "Compare two VDIs in 64k block increments and report which blocks \ + differ. This operation is not allowed when vdi_to is attached to \ + a VM." + ) + ] ~params: [ (Ref _vdi, "vdi_from", "The first VDI.") @@ -4368,7 +5043,25 @@ module VDI = struct ~allowed_roles:_R_VM_OP () let get_nbd_info = - call ~name:"get_nbd_info" ~in_oss_since:None ~in_product_since:rel_inverness + call ~name:"get_nbd_info" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_inverness + , "Get details specifying how to access this VDI via a Network Block \ + Device server. For each of a set of NBD server addresses on which \ + the VDI is available, the return value set contains a \ + vdi_nbd_server_info object that contains an exportname to request \ + once the NBD connection is established, and connection details \ + for the address. An empty list is returned if there is no network \ + that has a PIF on a host with access to the relevant SR, or if no \ + such network has been assigned an NBD-related purpose in its \ + purpose field. To access the given VDI, any of the \ + vdi_nbd_server_info objects can be used to make a connection to a \ + server, and then the VDI will be available by requesting the \ + exportname." + ) + ] ~params: [ ( Ref _vdi @@ -4594,14 +5287,22 @@ module VBD = struct ) let eject = - call ~name:"eject" ~in_product_since:rel_rio + call ~name:"eject" + ~lifecycle: + [ + ( Published + , rel_rio + , "Remove the media from the device and leave it empty" + ) + ] ~doc:"Remove the media from the device and leave it empty" ~params:[(Ref _vbd, "vbd", "The vbd representing the CDROM-like device")] ~errs:[Api_errors.vbd_not_removable_media; Api_errors.vbd_is_empty] ~allowed_roles:_R_VM_OP () let insert = - call ~name:"insert" ~in_product_since:rel_rio + call ~name:"insert" + ~lifecycle:[(Published, rel_rio, "Insert new media into the device")] ~doc:"Insert new media into the device" ~params: [ @@ -4612,14 +5313,30 @@ module VBD = struct ~allowed_roles:_R_VM_OP () let plug = - call ~name:"plug" ~in_product_since:rel_rio + call ~name:"plug" + ~lifecycle: + [ + ( Published + , rel_rio + , "Hotplug the specified VBD, dynamically attaching it to the \ + running VM" + ) + ] ~doc: "Hotplug the specified VBD, dynamically attaching it to the running VM" ~params:[(Ref _vbd, "self", "The VBD to hotplug")] ~allowed_roles:_R_VM_ADMIN () let unplug = - call ~name:"unplug" ~in_product_since:rel_rio + call ~name:"unplug" + ~lifecycle: + [ + ( Published + , rel_rio + , "Hot-unplug the specified VBD, dynamically unattaching it from the \ + running VM" + ) + ] ~doc: "Hot-unplug the specified VBD, dynamically unattaching it from the \ running VM" @@ -4629,7 +5346,8 @@ module VBD = struct ~allowed_roles:_R_VM_ADMIN () let unplug_force = - call ~name:"unplug_force" ~in_product_since:rel_rio + call ~name:"unplug_force" + ~lifecycle:[(Published, rel_rio, "Forcibly unplug the specified VBD")] ~doc:"Forcibly unplug the specified VBD" ~params:[(Ref _vbd, "self", "The VBD to forcibly unplug")] ~allowed_roles:_R_VM_ADMIN () @@ -4650,8 +5368,20 @@ module VBD = struct if the device supports surprise-remove)" ) ] - ~internal_deprecated_since:rel_ely ~hide_from_docs:true - ~in_product_since:rel_symc ~allowed_roles:_R_VM_ADMIN () + ~hide_from_docs:true + ~lifecycle: + [ + ( Published + , rel_symc + , "Deprecated: use 'unplug_force' instead. Forcibly unplug the \ + specified VBD without any safety checks. This is an extremely \ + dangerous operation in the general case that can cause guest \ + crashes and data corruption; it should be called with extreme \ + caution. Functionally equivalent with 'unplug_force'." + ) + ; (Deprecated, rel_ely, "") + ] + ~allowed_roles:_R_VM_ADMIN () let pause = call ~name:"pause" @@ -4659,7 +5389,15 @@ module VBD = struct "Stop the backend device servicing requests so that an operation can \ be performed on the disk (eg live resize, snapshot)" ~params:[(Ref _vbd, "self", "The VBD to pause")] - ~hide_from_docs:true ~in_product_since:rel_symc + ~hide_from_docs:true + ~lifecycle: + [ + ( Published + , rel_symc + , "Stop the backend device servicing requests so that an operation \ + can be performed on the disk (eg live resize, snapshot)" + ) + ] ~result: ( String , "Token to uniquely identify this pause instance, used to match the \ @@ -4690,11 +5428,27 @@ module VBD = struct ; param_default= Some (VString "") } ] - ~hide_from_docs:true ~in_product_since:rel_symc ~allowed_roles:_R_VM_ADMIN - () + ~hide_from_docs:true + ~lifecycle: + [ + ( Published + , rel_symc + , "Restart the backend device after it was paused while an operation \ + was performed on the disk (eg live resize, snapshot)" + ) + ] + ~allowed_roles:_R_VM_ADMIN () let assert_attachable = - call ~name:"assert_attachable" ~in_product_since:rel_rio + call ~name:"assert_attachable" + ~lifecycle: + [ + ( Published + , rel_rio + , "Throws an error if this VBD could not be attached to this VM if \ + the VM were running. Intended for debugging." + ) + ] ~doc: "Throws an error if this VBD could not be attached to this VM if the \ VM were running. Intended for debugging." @@ -4702,7 +5456,15 @@ module VBD = struct ~in_oss_since:None ~allowed_roles:_R_VM_ADMIN () let set_mode = - call ~name:"set_mode" ~in_product_since:rel_rio + call ~name:"set_mode" + ~lifecycle: + [ + ( Published + , rel_rio + , "Sets the mode of the VBD. The power_state of the VM must be \ + halted." + ) + ] ~doc:"Sets the mode of the VBD. The power_state of the VM must be halted." ~params: [ @@ -4837,7 +5599,8 @@ end module Crashdump = struct let destroy = - call ~name:"destroy" ~in_product_since:rel_rio + call ~name:"destroy" + ~lifecycle:[(Published, rel_rio, "Destroy the specified crashdump")] ~doc:"Destroy the specified crashdump" ~params:[(Ref _crashdump, "self", "The crashdump to destroy")] ~allowed_roles:_R_POOL_OP () @@ -4867,7 +5630,15 @@ module Auth = struct (** Auth class *) let get_subject_identifier = call ~flags:[`Session] ~name:"get_subject_identifier" ~in_oss_since:None - ~in_product_since:rel_george + ~lifecycle: + [ + ( Published + , rel_george + , "This call queries the external directory service to obtain the \ + subject_identifier as a string from the human-readable \ + subject_name" + ) + ] ~params: [ (*Ref _auth, "auth", "???";*) @@ -4887,7 +5658,16 @@ module Auth = struct let get_subject_information_from_identifier = call ~flags:[`Session] ~name:"get_subject_information_from_identifier" - ~in_oss_since:None ~in_product_since:rel_george + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_george + , "This call queries the external directory service to obtain the \ + user information (e.g. username, organization etc) from the \ + specified subject_identifier" + ) + ] ~params: [ ( String @@ -4908,7 +5688,15 @@ module Auth = struct let get_group_membership = call ~flags:[`Session] ~name:"get_group_membership" ~in_oss_since:None - ~in_product_since:rel_george + ~lifecycle: + [ + ( Published + , rel_george + , "This calls queries the external directory service to obtain the \ + transitively-closed set of groups that the the subject_identifier \ + is member of." + ) + ] ~params: [ ( String @@ -4947,7 +5735,13 @@ module Subject = struct (** Subject class *) let add_to_roles = call ~flags:[`Session] ~name:"add_to_roles" ~in_oss_since:None - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "This call adds a new role to a subject" + ) + ] ~params: [ (Ref _subject, "self", "The subject who we want to add the role to") @@ -4958,7 +5752,13 @@ module Subject = struct let remove_from_roles = call ~flags:[`Session] ~name:"remove_from_roles" ~in_oss_since:None - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "This call removes a role from a subject" + ) + ] ~params: [ ( Ref _subject @@ -4975,7 +5775,13 @@ module Subject = struct let get_permissions_name_label = call ~flags:[`Session] ~name:"get_permissions_name_label" ~in_oss_since:None - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "This call returns a list of permission names given a subject" + ) + ] ~params: [ ( Ref _subject @@ -5020,7 +5826,13 @@ module Role = struct (** Role class *) let get_permissions = call ~flags:[`Session] ~name:"get_permissions" ~in_oss_since:None - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "This call returns a list of permissions given a role" + ) + ] ~params:[(Ref _role, "self", "a reference to a role")] ~result:(Set (Ref _role), "a list of permissions") ~doc:"This call returns a list of permissions given a role" @@ -5028,7 +5840,13 @@ module Role = struct let get_permissions_name_label = call ~flags:[`Session] ~name:"get_permissions_name_label" ~in_oss_since:None - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "This call returns a list of permission names given a role" + ) + ] ~params:[(Ref _role, "self", "a reference to a role")] ~result:(Set String, "a list of permission names") ~doc:"This call returns a list of permission names given a role" @@ -5036,7 +5854,13 @@ module Role = struct let get_by_permission = call ~flags:[`Session] ~name:"get_by_permission" ~in_oss_since:None - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "This call returns a list of roles given a permission" + ) + ] ~params:[(Ref _role, "permission", "a reference to a permission")] ~result:(Set (Ref _role), "a list of references to roles") ~doc:"This call returns a list of roles given a permission" @@ -5044,7 +5868,14 @@ module Role = struct let get_by_permission_name_label = call ~flags:[`Session] ~name:"get_by_permission_name_label" - ~in_oss_since:None ~in_product_since:rel_midnight_ride + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "This call returns a list of roles given a permission name" + ) + ] ~params:[(String, "label", "The short friendly name of the role")] ~result:(Set (Ref _role), "a list of references to roles") ~doc:"This call returns a list of roles given a permission name" @@ -5791,7 +6622,13 @@ module VMSS = struct (* VM schedule snapshot *) let snapshot_now = call ~flags:[`Session] ~name:"snapshot_now" ~in_oss_since:None - ~in_product_since:rel_falcon + ~lifecycle: + [ + ( Published + , rel_falcon + , "This call executes the snapshot schedule immediately" + ) + ] ~params:[(Ref _vmss, "vmss", "Snapshot Schedule to execute")] ~doc:"This call executes the snapshot schedule immediately" ~allowed_roles:_R_POOL_OP @@ -5826,7 +6663,8 @@ module VMSS = struct let set_retained_snapshots = call ~flags:[`Session] ~name:"set_retained_snapshots" ~in_oss_since:None - ~in_product_since:rel_falcon ~allowed_roles:_R_POOL_OP + ~lifecycle:[(Published, rel_falcon, "")] + ~allowed_roles:_R_POOL_OP ~params: [ (Ref _vmss, "self", "The schedule snapshot") @@ -5836,7 +6674,8 @@ module VMSS = struct let set_frequency = call ~flags:[`Session] ~name:"set_frequency" ~in_oss_since:None - ~in_product_since:rel_falcon + ~lifecycle: + [(Published, rel_falcon, "Set the value of the frequency field")] ~params: [ (Ref _vmss, "self", "The snapshot schedule") @@ -5846,7 +6685,8 @@ module VMSS = struct let set_schedule = call ~flags:[`Session] ~name:"set_schedule" ~in_oss_since:None - ~in_product_since:rel_falcon ~allowed_roles:_R_POOL_OP + ~lifecycle:[(Published, rel_falcon, "")] + ~allowed_roles:_R_POOL_OP ~params: [ (Ref _vmss, "self", "The snapshot schedule") @@ -5856,7 +6696,8 @@ module VMSS = struct let set_last_run_time = call ~flags:[`Session] ~name:"set_last_run_time" ~in_oss_since:None - ~in_product_since:rel_falcon ~allowed_roles:_R_LOCAL_ROOT_ONLY + ~lifecycle:[(Published, rel_falcon, "")] + ~allowed_roles:_R_LOCAL_ROOT_ONLY ~params: [ (Ref _vmss, "self", "The snapshot schedule") @@ -5870,7 +6711,8 @@ module VMSS = struct let add_to_schedule = call ~flags:[`Session] ~name:"add_to_schedule" ~in_oss_since:None - ~in_product_since:rel_falcon ~allowed_roles:_R_POOL_OP + ~lifecycle:[(Published, rel_falcon, "")] + ~allowed_roles:_R_POOL_OP ~params: [ (Ref _vmss, "self", "The snapshot schedule") @@ -5881,7 +6723,8 @@ module VMSS = struct let remove_from_schedule = call ~flags:[`Session] ~name:"remove_from_schedule" ~in_oss_since:None - ~in_product_since:rel_falcon ~allowed_roles:_R_POOL_OP + ~lifecycle:[(Published, rel_falcon, "")] + ~allowed_roles:_R_POOL_OP ~params: [ (Ref _vmss, "self", "The snapshot schedule") @@ -5891,7 +6734,8 @@ module VMSS = struct let set_type = call ~flags:[`Session] ~name:"set_type" ~in_oss_since:None - ~in_product_since:rel_falcon ~allowed_roles:_R_POOL_OP + ~lifecycle:[(Published, rel_falcon, "")] + ~allowed_roles:_R_POOL_OP ~params: [ (Ref _vmss, "self", "The snapshot schedule") @@ -5961,7 +6805,8 @@ module VM_appliance = struct ) let start = - call ~name:"start" ~in_product_since:rel_boston + call ~name:"start" + ~lifecycle:[(Published, rel_boston, "Start all VMs in the appliance")] ~params: [ (Ref _vm_appliance, "self", "The VM appliance") @@ -5975,21 +6820,43 @@ module VM_appliance = struct ~doc:"Start all VMs in the appliance" ~allowed_roles:_R_POOL_OP () let clean_shutdown = - call ~name:"clean_shutdown" ~in_product_since:rel_boston + call ~name:"clean_shutdown" + ~lifecycle: + [ + ( Published + , rel_boston + , "Perform a clean shutdown of all the VMs in the appliance" + ) + ] ~params:[(Ref _vm_appliance, "self", "The VM appliance")] ~errs:[Api_errors.operation_partially_failed] ~doc:"Perform a clean shutdown of all the VMs in the appliance" ~allowed_roles:_R_POOL_OP () let hard_shutdown = - call ~name:"hard_shutdown" ~in_product_since:rel_boston + call ~name:"hard_shutdown" + ~lifecycle: + [ + ( Published + , rel_boston + , "Perform a hard shutdown of all the VMs in the appliance" + ) + ] ~params:[(Ref _vm_appliance, "self", "The VM appliance")] ~errs:[Api_errors.operation_partially_failed] ~doc:"Perform a hard shutdown of all the VMs in the appliance" ~allowed_roles:_R_POOL_OP () let shutdown = - call ~name:"shutdown" ~in_product_since:rel_boston + call ~name:"shutdown" + ~lifecycle: + [ + ( Published + , rel_boston + , "For each VM in the appliance, try to shut it down cleanly. If \ + this fails, perform a hard shutdown of the VM." + ) + ] ~params:[(Ref _vm_appliance, "self", "The VM appliance")] ~errs:[Api_errors.operation_partially_failed] ~doc: @@ -5998,7 +6865,15 @@ module VM_appliance = struct ~allowed_roles:_R_POOL_OP () let assert_can_be_recovered = - call ~name:"assert_can_be_recovered" ~in_product_since:rel_boston + call ~name:"assert_can_be_recovered" + ~lifecycle: + [ + ( Published + , rel_boston + , "Assert whether all SRs required to recover this VM appliance are \ + available." + ) + ] ~params: [ (Ref _vm_appliance, "self", "The VM appliance to recover") @@ -6014,7 +6889,14 @@ module VM_appliance = struct ~allowed_roles:_R_READ_ONLY () let get_SRs_required_for_recovery = - call ~name:"get_SRs_required_for_recovery" ~in_product_since:rel_creedence + call ~name:"get_SRs_required_for_recovery" + ~lifecycle: + [ + ( Published + , rel_creedence + , "Get the list of SRs required by the VM appliance to recover." + ) + ] ~params: [ ( Ref _vm_appliance @@ -6033,7 +6915,8 @@ module VM_appliance = struct ~allowed_roles:_R_READ_ONLY () let recover = - call ~name:"recover" ~in_product_since:rel_boston + call ~name:"recover" + ~lifecycle:[(Published, rel_boston, "Recover the VM appliance")] ~params: [ (Ref _vm_appliance, "self", "The VM appliance to recover") @@ -6080,7 +6963,15 @@ end module DR_task = struct (* DR_task *) let create = - call ~name:"create" ~in_product_since:rel_boston + call ~name:"create" + ~lifecycle: + [ + ( Published + , rel_boston + , "Create a disaster recovery task which will query the supplied \ + list of devices" + ) + ] ~params: [ (String, "type", "The SR driver type of the SRs to introduce") @@ -6097,7 +6988,15 @@ module DR_task = struct ~allowed_roles:_R_POOL_OP () let destroy = - call ~name:"destroy" ~in_product_since:rel_boston + call ~name:"destroy" + ~lifecycle: + [ + ( Published + , rel_boston + , "Destroy the disaster recovery task, detaching and forgetting any \ + SRs introduced which are no longer required" + ) + ] ~params:[(Ref _dr_task, "self", "The disaster recovery task to destroy")] ~doc: "Destroy the disaster recovery task, detaching and forgetting any SRs \ @@ -6133,8 +7032,17 @@ module Event = struct ) let register = - call ~name:"register" ~in_product_since:rel_rio - ~internal_deprecated_since:rel_boston + call ~name:"register" + ~lifecycle: + [ + ( Published + , rel_rio + , "Registers this session with the event system for a set of given \ + classes. This method is only recommended for legacy use in \ + conjunction with event.next." + ) + ; (Deprecated, rel_boston, "") + ] ~params: [ ( Set String @@ -6151,8 +7059,17 @@ module Event = struct ~allowed_roles:_R_ALL () let unregister = - call ~name:"unregister" ~in_product_since:rel_rio - ~internal_deprecated_since:rel_boston + call ~name:"unregister" + ~lifecycle: + [ + ( Published + , rel_rio + , "Removes this session's registration with the event system for a \ + set of given classes. This method is only recommended for legacy \ + use in conjunction with event.next." + ) + ; (Deprecated, rel_boston, "") + ] ~params: [ ( Set String @@ -6168,8 +7085,17 @@ module Event = struct ~allowed_roles:_R_ALL () let next = - call ~name:"next" ~params:[] ~in_product_since:rel_rio - ~internal_deprecated_since:rel_boston + call ~name:"next" ~params:[] + ~lifecycle: + [ + ( Published + , rel_rio + , "Blocking call which returns a (possibly empty) batch of events. \ + This method is only recommended for legacy use. New development \ + should use event.from which supersedes this method." + ) + ; (Deprecated, rel_boston, "") + ] ~doc: "Blocking call which returns a (possibly empty) batch of events. This \ method is only recommended for legacy use. New development should use \ @@ -6194,7 +7120,15 @@ module Event = struct ) ; (Float, "timeout", "Return after this many seconds if no events match") ] - ~in_product_since:rel_boston + ~lifecycle: + [ + ( Published + , rel_boston + , "Blocking call which returns a new token and a (possibly empty) \ + batch of events. The returned token can be used in subsequent \ + calls to this function." + ) + ] ~doc: "Blocking call which returns a new token and a (possibly empty) batch \ of events. The returned token can be used in subsequent calls to this \ @@ -6213,7 +7147,14 @@ module Event = struct ~allowed_roles:_R_ALL () let get_current_id = - call ~name:"get_current_id" ~params:[] ~in_product_since:rel_rio + call ~name:"get_current_id" ~params:[] + ~lifecycle: + [ + ( Published + , rel_rio + , "Return the ID of the next event to be generated by the system" + ) + ] ~doc:"Return the ID of the next event to be generated by the system" ~flags:[`Session] ~result:(Int, "the event ID") ~allowed_roles:_R_ALL () @@ -6224,7 +7165,20 @@ module Event = struct (String, "class", "class of the object") ; (String, "ref", "A reference to the object that will be changed.") ] - ~in_product_since:rel_tampa + ~lifecycle: + [ + ( Published + , rel_tampa + , "Injects an artificial event on the given object and returns the \ + corresponding ID in the form of a token, which can be used as a \ + point of reference for database events. For example, to check \ + whether an object has reached the right state before attempting \ + an operation, one can inject an artificial event on the object \ + and wait until the token returned by consecutive event.from calls \ + is lexicographically greater than the one returned by \ + event.inject." + ) + ] ~doc: "Injects an artificial event on the given object and returns the \ corresponding ID in the form of a token, which can be used as a point \ @@ -6291,7 +7245,9 @@ end module Blob = struct let create = - call ~name:"create" ~in_product_since:rel_orlando + call ~name:"create" + ~lifecycle: + [(Published, rel_orlando, "Create a placeholder for a binary blob")] ~versioned_params: [ { @@ -6316,7 +7272,8 @@ module Blob = struct ~allowed_roles:_R_POOL_OP () let destroy = - call ~name:"destroy" ~in_product_since:rel_orlando + call ~name:"destroy" + ~lifecycle:[(Published, rel_orlando, "")] ~params:[(Ref _blob, "self", "The reference of the blob to destroy")] ~flags:[`Session] ~allowed_roles:_R_POOL_OP () @@ -6362,7 +7319,8 @@ module Message = struct ) let create = - call ~name:"create" ~in_product_since:rel_orlando + call ~name:"create" + ~lifecycle:[(Published, rel_orlando, "")] ~params: [ (String, "name", "The name of the message") @@ -6379,7 +7337,8 @@ module Message = struct ~allowed_roles:_R_POOL_OP () let destroy = - call ~name:"destroy" ~in_product_since:rel_orlando + call ~name:"destroy" + ~lifecycle:[(Published, rel_orlando, "")] ~params: [(Ref _message, "self", "The reference of the message to destroy")] ~flags:[`Session] ~allowed_roles:_R_POOL_OP () @@ -6390,13 +7349,15 @@ module Message = struct ~allowed_roles:_R_POOL_OP () let get_all = - call ~name:"get_all" ~in_product_since:rel_orlando ~params:[] - ~flags:[`Session] + call ~name:"get_all" + ~lifecycle:[(Published, rel_orlando, "")] + ~params:[] ~flags:[`Session] ~result:(Set (Ref _message), "The references to the messages") ~allowed_roles:_R_READ_ONLY () let get = - call ~name:"get" ~in_product_since:rel_orlando + call ~name:"get" + ~lifecycle:[(Published, rel_orlando, "")] ~params: [ (cls, "cls", "The class of object") @@ -6411,7 +7372,8 @@ module Message = struct ~allowed_roles:_R_READ_ONLY () let get_since = - call ~name:"get_since" ~in_product_since:rel_orlando + call ~name:"get_since" + ~lifecycle:[(Published, rel_orlando, "")] ~params: [ ( DateTime @@ -6424,27 +7386,31 @@ module Message = struct ~allowed_roles:_R_READ_ONLY () let get_by_uuid = - call ~name:"get_by_uuid" ~in_product_since:rel_orlando + call ~name:"get_by_uuid" + ~lifecycle:[(Published, rel_orlando, "")] ~params:[(String, "uuid", "The uuid of the message")] ~flags:[`Session] ~result:(Ref _message, "The message reference") ~allowed_roles:_R_READ_ONLY () let get_record = - call ~name:"get_record" ~in_product_since:rel_orlando + call ~name:"get_record" + ~lifecycle:[(Published, rel_orlando, "")] ~params:[(Ref _message, "self", "The reference to the message")] ~flags:[`Session] ~result:(Record _message, "The message record") ~allowed_roles:_R_READ_ONLY () let get_all_records = - call ~name:"get_all_records" ~in_product_since:rel_orlando ~params:[] - ~flags:[`Session] + call ~name:"get_all_records" + ~lifecycle:[(Published, rel_orlando, "")] + ~params:[] ~flags:[`Session] ~result:(Map (Ref _message, Record _message), "The messages") ~allowed_roles:_R_READ_ONLY () let get_all_records_where = - call ~name:"get_all_records_where" ~in_product_since:rel_orlando + call ~name:"get_all_records_where" + ~lifecycle:[(Published, rel_orlando, "")] ~params:[(String, "expr", "The expression to match (not currently used)")] ~flags:[`Session] ~result:(Map (Ref _message, Record _message), "The messages") @@ -6495,7 +7461,8 @@ end module Secret = struct let introduce = - call ~name:"introduce" ~in_product_since:rel_midnight_ride + call ~name:"introduce" + ~lifecycle:[(Published, rel_midnight_ride, "")] ~versioned_params: [ { diff --git a/ocaml/idl/datamodel_common.ml b/ocaml/idl/datamodel_common.ml index 8b34e41c8ec..07ea70f90a4 100644 --- a/ocaml/idl/datamodel_common.ml +++ b/ocaml/idl/datamodel_common.ml @@ -573,36 +573,20 @@ let get_deprecated lifecycle = Some deprecated with Not_found -> None -let call ~name ?(doc = "") ?(in_oss_since = Some "3.0.3") ?in_product_since - ?internal_deprecated_since ?result ?(flags = [`Session; `Async]) - ?(effect = true) ?(tag = Custom) ?(errs = []) ?(custom_marshaller = false) - ?(db_only = false) ?(no_current_operations = false) ?(secret = false) - ?(hide_from_docs = false) ?(pool_internal = false) ~allowed_roles - ?(map_keys_roles = []) ?(params = []) ?versioned_params ?lifecycle - ?(doc_tags = []) ?forward_to () = +let call ~name ?(doc = "") ?(in_oss_since = Some "3.0.3") ?result + ?(flags = [`Session; `Async]) ?(effect = true) ?(tag = Custom) ?(errs = []) + ?(custom_marshaller = false) ?(db_only = false) + ?(no_current_operations = false) ?(secret = false) ?(hide_from_docs = false) + ?(pool_internal = false) ~allowed_roles ?(map_keys_roles = []) + ?(params = []) ?versioned_params ?lifecycle ?(doc_tags = []) ?forward_to () + = (* if you specify versioned_params then these get put in the params field of the message record; * otherwise params go in with no default values and param_release=call_release... *) - if lifecycle = None && in_product_since = None then - failwith ("Lifecycle for message '" ^ name ^ "' not specified") ; let lifecycle = match lifecycle with | None -> - let published = - match in_product_since with - | None -> - [] - | Some rel -> - [(Published, rel, doc)] - in - let deprecated = - match internal_deprecated_since with - | None -> - [] - | Some rel -> - [(Deprecated, rel, "")] - in - published @ deprecated + failwith ("Lifecycle for message '" ^ name ^ "' not specified") | Some l -> l in diff --git a/ocaml/idl/datamodel_diagnostics.ml b/ocaml/idl/datamodel_diagnostics.ml index 88c40eb47cc..b81d12ca905 100644 --- a/ocaml/idl/datamodel_diagnostics.ml +++ b/ocaml/idl/datamodel_diagnostics.ml @@ -1,14 +1,23 @@ open Datamodel_common let gc_compact = - call ~name:"gc_compact" ~in_product_since:Datamodel_types.rel_stockholm + call ~name:"gc_compact" + ~lifecycle: + [ + ( Published + , Datamodel_types.rel_stockholm + , "Perform a full major collection and compact the heap on a host" + ) + ] ~doc:"Perform a full major collection and compact the heap on a host" ~hide_from_docs:true ~params:[(Ref _host, "host", "The host to perform GC")] ~errs:[] ~allowed_roles:Datamodel_roles._R_POOL_OP () let gc_stats = - call ~name:"gc_stats" ~in_product_since:Datamodel_types.rel_stockholm + call ~name:"gc_stats" + ~lifecycle: + [(Published, Datamodel_types.rel_stockholm, "Get GC stats of a host")] ~doc:"Get GC stats of a host" ~hide_from_docs:true ~params:[(Ref _host, "host", "The host from which to obtain GC stats")] ~errs:[] ~allowed_roles:Datamodel_roles._R_POOL_OP @@ -16,14 +25,25 @@ let gc_stats = () let db_stats = - call ~name:"db_stats" ~in_product_since:Datamodel_types.rel_stockholm + call ~name:"db_stats" + ~lifecycle: + [ + ( Published + , Datamodel_types.rel_stockholm + , "Get the database stats of the pool" + ) + ] ~doc:"Get the database stats of the pool" ~hide_from_docs:true ~params:[] ~errs:[] ~allowed_roles:Datamodel_roles._R_POOL_OP ~result:(Map (String, String), "Collection of database stats") () let network_stats = - call ~name:"network_stats" ~in_product_since:Datamodel_types.rel_stockholm + call ~name:"network_stats" + ~lifecycle: + [ + (Published, Datamodel_types.rel_stockholm, "Get network stats of a host") + ] ~doc:"Get network stats of a host" ~hide_from_docs:true ~params: [ diff --git a/ocaml/idl/datamodel_host.ml b/ocaml/idl/datamodel_host.ml index 5bb9887a046..76508745467 100644 --- a/ocaml/idl/datamodel_host.ml +++ b/ocaml/idl/datamodel_host.ml @@ -26,7 +26,16 @@ let api_version = ] let migrate_receive = - call ~in_oss_since:None ~in_product_since:rel_tampa ~name:"migrate_receive" + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_tampa + , "Prepare to receive a VM, returning a token which can be passed to \ + VM.migrate." + ) + ] + ~name:"migrate_receive" ~doc: "Prepare to receive a VM, returning a token which can be passed to \ VM.migrate." @@ -44,7 +53,17 @@ let migrate_receive = ~allowed_roles:_R_VM_POWER_ADMIN () let ha_disable_failover_decisions = - call ~in_product_since:rel_orlando ~name:"ha_disable_failover_decisions" + call + ~lifecycle: + [ + ( Published + , rel_orlando + , "Prevents future failover decisions happening on this node. This \ + function should only be used as part of a controlled shutdown of \ + the HA system." + ) + ] + ~name:"ha_disable_failover_decisions" ~doc: "Prevents future failover decisions happening on this node. This \ function should only be used as part of a controlled shutdown of the HA \ @@ -54,7 +73,17 @@ let ha_disable_failover_decisions = () let ha_disarm_fencing = - call ~in_product_since:rel_orlando ~name:"ha_disarm_fencing" + call + ~lifecycle: + [ + ( Published + , rel_orlando + , "Disarms the fencing function of the HA subsystem. This function is \ + extremely dangerous and should only be used as part of a controlled \ + shutdown of the HA system." + ) + ] + ~name:"ha_disarm_fencing" ~doc: "Disarms the fencing function of the HA subsystem. This function is \ extremely dangerous and should only be used as part of a controlled \ @@ -64,7 +93,17 @@ let ha_disarm_fencing = () let ha_stop_daemon = - call ~in_product_since:rel_orlando ~name:"ha_stop_daemon" + call + ~lifecycle: + [ + ( Published + , rel_orlando + , "Stops the HA daemon. This function is extremely dangerous and \ + should only be used as part of a controlled shutdown of the HA \ + system." + ) + ] + ~name:"ha_stop_daemon" ~doc: "Stops the HA daemon. This function is extremely dangerous and should \ only be used as part of a controlled shutdown of the HA system." @@ -73,7 +112,16 @@ let ha_stop_daemon = () let ha_release_resources = - call ~in_product_since:rel_orlando ~name:"ha_release_resources" + call + ~lifecycle: + [ + ( Published + , rel_orlando + , "Cleans up any resources on the host associated with this HA \ + instance." + ) + ] + ~name:"ha_release_resources" ~doc:"Cleans up any resources on the host associated with this HA instance." ~params: [(Ref _host, "host", "The Host whose resources should be cleaned up")] @@ -81,7 +129,15 @@ let ha_release_resources = () let local_assert_healthy = - call ~flags:[`Session] ~in_product_since:rel_miami + call ~flags:[`Session] + ~lifecycle: + [ + ( Published + , rel_miami + , "Returns nothing if this host is healthy, otherwise it throws an \ + error explaining why the host is unhealthy" + ) + ] ~name:"local_assert_healthy" ~doc: "Returns nothing if this host is healthy, otherwise it throws an error \ @@ -102,7 +158,16 @@ let local_assert_healthy = ~allowed_roles:_R_LOCAL_ROOT_ONLY () let preconfigure_ha = - call ~in_product_since:rel_miami ~name:"preconfigure_ha" + call + ~lifecycle: + [ + ( Published + , rel_miami + , "Attach statefiles, generate config files but do not start the xHA \ + daemon." + ) + ] + ~name:"preconfigure_ha" ~doc: "Attach statefiles, generate config files but do not start the xHA \ daemon." @@ -117,14 +182,26 @@ let preconfigure_ha = () let ha_join_liveset = - call ~in_product_since:rel_orlando ~name:"ha_join_liveset" - ~doc:"Block until this host joins the liveset." + call + ~lifecycle: + [(Published, rel_orlando, "Block until this host joins the liveset.")] + ~name:"ha_join_liveset" ~doc:"Block until this host joins the liveset." ~params:[(Ref _host, "host", "The Host whose HA daemon to start")] ~pool_internal:true ~hide_from_docs:true ~allowed_roles:_R_LOCAL_ROOT_ONLY () let ha_wait_for_shutdown_via_statefile = - call ~in_product_since:rel_orlando ~name:"ha_wait_for_shutdown_via_statefile" + call + ~lifecycle: + [ + ( Published + , rel_orlando + , "Block until this host xHA daemon exits after having seen the \ + invalid statefile. If the host loses statefile access then throw an \ + exception" + ) + ] + ~name:"ha_wait_for_shutdown_via_statefile" ~doc: "Block until this host xHA daemon exits after having seen the invalid \ statefile. If the host loses statefile access then throw an exception" @@ -144,7 +221,9 @@ let host_query_ha = call ~flags:[`Session] () *) let request_backup = - call ~flags:[`Session] ~name:"request_backup" ~in_product_since:rel_rio + call ~flags:[`Session] ~name:"request_backup" + ~lifecycle: + [(Published, rel_rio, "Request this host performs a database backup")] ~doc:"Request this host performs a database backup" ~params: [ @@ -161,7 +240,9 @@ let request_backup = let request_config_file_sync = call ~flags:[`Session] ~name:"request_config_file_sync" - ~in_product_since:rel_rio ~doc:"Request this host syncs dom0 config files" + ~lifecycle: + [(Published, rel_rio, "Request this host syncs dom0 config files")] + ~doc:"Request this host syncs dom0 config files" ~params: [ (Ref _host, "host", "The Host to send the request to") @@ -173,7 +254,18 @@ let request_config_file_sync = (* Since there are no async versions, no tasks are generated (!) this is important otherwise the call would block doing a Db.Task.create *) let propose_new_master = - call ~flags:[`Session] ~in_product_since:rel_miami ~name:"propose_new_master" + call ~flags:[`Session] + ~lifecycle: + [ + ( Published + , rel_miami + , "First phase of a two-phase commit protocol to set the new master. \ + If the host has already committed to another configuration or if \ + the proposed new master is not in this node's membership set then \ + the call will return an exception." + ) + ] + ~name:"propose_new_master" ~doc: "First phase of a two-phase commit protocol to set the new master. If \ the host has already committed to another configuration or if the \ @@ -195,8 +287,10 @@ let propose_new_master = () let abort_new_master = - call ~flags:[`Session] ~in_product_since:rel_miami ~name:"abort_new_master" - ~doc:"Causes the new master transaction to abort" + call ~flags:[`Session] + ~lifecycle: + [(Published, rel_miami, "Causes the new master transaction to abort")] + ~name:"abort_new_master" ~doc:"Causes the new master transaction to abort" ~params: [ ( String @@ -208,7 +302,15 @@ let abort_new_master = () let commit_new_master = - call ~flags:[`Session] ~in_product_since:rel_miami ~name:"commit_new_master" + call ~flags:[`Session] + ~lifecycle: + [ + ( Published + , rel_miami + , "Second phase of a two-phase commit protocol to set the new master." + ) + ] + ~name:"commit_new_master" ~doc:"Second phase of a two-phase commit protocol to set the new master." ~params: [ @@ -221,7 +323,15 @@ let commit_new_master = () let compute_free_memory = - call ~in_product_since:rel_orlando ~name:"compute_free_memory" + call + ~lifecycle: + [ + ( Published + , rel_orlando + , "Computes the amount of free memory on the host." + ) + ] + ~name:"compute_free_memory" ~doc:"Computes the amount of free memory on the host." ~params:[(Ref _host, "host", "The host to send the request to")] ~pool_internal:false ~hide_from_docs:false @@ -229,7 +339,15 @@ let compute_free_memory = ~allowed_roles:_R_READ_ONLY ~doc_tags:[Memory] () let compute_memory_overhead = - call ~in_product_since:rel_midnight_ride ~name:"compute_memory_overhead" + call + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Computes the virtualization memory overhead of a host." + ) + ] + ~name:"compute_memory_overhead" ~doc:"Computes the virtualization memory overhead of a host." ~params: [(Ref _host, "host", "The host for which to compute the memory overhead")] @@ -239,7 +357,14 @@ let compute_memory_overhead = (* Diagnostics see if host is in emergency mode *) let is_in_emergency_mode = - call ~flags:[`Session] ~in_product_since:rel_miami + call ~flags:[`Session] + ~lifecycle: + [ + ( Published + , rel_miami + , "Diagnostics call to discover if host is in emergency mode" + ) + ] ~name:"is_in_emergency_mode" ~doc:"Diagnostics call to discover if host is in emergency mode" ~params:[] ~pool_internal:false ~hide_from_docs:true @@ -248,7 +373,15 @@ let is_in_emergency_mode = (* Signal that the management IP address or hostname has been changed beneath us. *) let signal_networking_change = - call ~flags:[`Session] ~in_product_since:rel_miami + call ~flags:[`Session] + ~lifecycle: + [ + ( Published + , rel_miami + , "Signals that the management IP address or hostname has been changed \ + beneath us." + ) + ] ~name:"signal_networking_change" ~doc: "Signals that the management IP address or hostname has been changed \ @@ -257,7 +390,9 @@ let signal_networking_change = ~doc_tags:[Networking] () let notify = - call ~in_product_since:rel_miami ~name:"notify" ~doc:"Notify an event" + call + ~lifecycle:[(Published, rel_miami, "Notify an event")] + ~name:"notify" ~doc:"Notify an event" ~params: [ (String, "ty", "type of the notification") @@ -267,8 +402,9 @@ let notify = () let syslog_reconfigure = - call ~in_product_since:rel_miami ~name:"syslog_reconfigure" - ~doc:"Re-configure syslog logging" + call + ~lifecycle:[(Published, rel_miami, "Re-configure syslog logging")] + ~name:"syslog_reconfigure" ~doc:"Re-configure syslog logging" ~params: [ ( Ref _host @@ -280,7 +416,10 @@ let syslog_reconfigure = ~allowed_roles:_R_POOL_OP () let management_reconfigure = - call ~in_product_since:rel_miami ~name:"management_reconfigure" + call + ~lifecycle: + [(Published, rel_miami, "Reconfigure the management network interface")] + ~name:"management_reconfigure" ~doc:"Reconfigure the management network interface" ~params: [ @@ -292,7 +431,16 @@ let management_reconfigure = ~allowed_roles:_R_POOL_OP ~doc_tags:[Networking] () let local_management_reconfigure = - call ~flags:[`Session] ~in_product_since:rel_miami + call ~flags:[`Session] + ~lifecycle: + [ + ( Published + , rel_miami + , "Reconfigure the management network interface. Should only be used \ + if Host.management_reconfigure is impossible because the network \ + configuration is broken." + ) + ] ~name:"local_management_reconfigure" ~doc: "Reconfigure the management network interface. Should only be used if \ @@ -308,16 +456,25 @@ let local_management_reconfigure = ~allowed_roles:_R_POOL_OP () let ha_xapi_healthcheck = - call ~flags:[`Session] ~in_product_since:rel_orlando + call ~flags:[`Session] + ~lifecycle: + [ + ( Published + , rel_orlando + , "Returns true if xapi appears to be functioning normally." + ) + ] ~name:"ha_xapi_healthcheck" ~doc:"Returns true if xapi appears to be functioning normally." ~result:(Bool, "true if xapi is functioning normally.") ~hide_from_docs:true ~allowed_roles:_R_POOL_ADMIN () let management_disable = - call ~flags:[`Session] ~in_product_since:rel_miami ~name:"management_disable" - ~doc:"Disable the management network interface" ~params:[] - ~allowed_roles:_R_POOL_OP ~doc_tags:[Networking] () + call ~flags:[`Session] + ~lifecycle: + [(Published, rel_miami, "Disable the management network interface")] + ~name:"management_disable" ~doc:"Disable the management network interface" + ~params:[] ~allowed_roles:_R_POOL_OP ~doc_tags:[Networking] () let get_management_interface = call @@ -333,15 +490,25 @@ let get_management_interface = Not intended for HA *) let assert_can_evacuate = - call ~in_product_since:rel_miami ~name:"assert_can_evacuate" - ~doc:"Check this host can be evacuated." + call + ~lifecycle:[(Published, rel_miami, "Check this host can be evacuated.")] + ~name:"assert_can_evacuate" ~doc:"Check this host can be evacuated." ~params:[(Ref _host, "host", "The host to evacuate")] ~allowed_roles:_R_POOL_OP () (* New Orlando message which aims to make the GUI less brittle (unexpected errors will trigger a VM suspend) and sensitive to HA planning constraints *) let get_vms_which_prevent_evacuation = - call ~in_product_since:rel_orlando ~name:"get_vms_which_prevent_evacuation" + call + ~lifecycle: + [ + ( Published + , rel_orlando + , "Return a set of VMs which prevent the host being evacuated, with \ + per-VM error codes" + ) + ] + ~name:"get_vms_which_prevent_evacuation" ~doc: "Return a set of VMs which prevent the host being evacuated, with per-VM \ error codes" @@ -353,8 +520,7 @@ let get_vms_which_prevent_evacuation = ~allowed_roles:_R_READ_ONLY () let evacuate = - call ~in_product_since:rel_miami ~name:"evacuate" - ~doc:"Migrate all VMs off of this host, where possible." + call ~name:"evacuate" ~doc:"Migrate all VMs off of this host, where possible." ~lifecycle: [ (Published, rel_miami, "") @@ -391,7 +557,16 @@ let evacuate = () let get_uncooperative_resident_VMs = - call ~in_product_since:rel_midnight_ride ~internal_deprecated_since:rel_tampa + call + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Return a set of VMs which are not co-operating with the host's \ + memory control system" + ) + ; (Deprecated, rel_tampa, "") + ] ~name:"get_uncooperative_resident_VMs" ~doc: "Return a set of VMs which are not co-operating with the host's memory \ @@ -401,7 +576,16 @@ let get_uncooperative_resident_VMs = ~allowed_roles:_R_READ_ONLY () let get_uncooperative_domains = - call ~in_product_since:rel_midnight_ride ~internal_deprecated_since:rel_tampa + call + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Return the set of domain uuids which are not co-operating with the \ + host's memory control system" + ) + ; (Deprecated, rel_tampa, "") + ] ~name:"get_uncooperative_domains" ~doc: "Return the set of domain uuids which are not co-operating with the \ @@ -413,7 +597,15 @@ let get_uncooperative_domains = let retrieve_wlb_evacuate_recommendations = call ~name:"retrieve_wlb_evacuate_recommendations" - ~in_product_since:rel_george + ~lifecycle: + [ + ( Published + , rel_george + , "Retrieves recommended host migrations to perform when evacuating \ + the host from the wlb server. If a VM cannot be migrated from the \ + host the reason is listed instead of a recommendation." + ) + ] ~doc: "Retrieves recommended host migrations to perform when evacuating the \ host from the wlb server. If a VM cannot be migrated from the host the \ @@ -429,7 +621,16 @@ let retrieve_wlb_evacuate_recommendations = (* Host.Disable *) let disable = - call ~in_product_since:rel_rio ~name:"disable" + call + ~lifecycle: + [ + ( Published + , rel_rio + , "Puts the host into a state in which no new VMs can be started. \ + Currently active VMs on the host continue to execute." + ) + ] + ~name:"disable" ~doc: "Puts the host into a state in which no new VMs can be started. \ Currently active VMs on the host continue to execute." @@ -440,7 +641,14 @@ let disable = (* Host.Enable *) let enable = - call ~name:"enable" ~in_product_since:rel_rio + call ~name:"enable" + ~lifecycle: + [ + ( Published + , rel_rio + , "Puts the host into a state in which new VMs can be started." + ) + ] ~doc:"Puts the host into a state in which new VMs can be started." ~params:[(Ref _host, "host", "The Host to enable")] ~allowed_roles:(_R_POOL_OP ++ _R_CLIENT_CERT) @@ -449,7 +657,15 @@ let enable = (* Host.Shutdown *) let shutdown = - call ~name:"shutdown" ~in_product_since:rel_rio + call ~name:"shutdown" + ~lifecycle: + [ + ( Published + , rel_rio + , "Shutdown the host. (This function can only be called if there are \ + no currently running VMs on the host and it is disabled.)" + ) + ] ~doc: "Shutdown the host. (This function can only be called if there are no \ currently running VMs on the host and it is disabled.)" @@ -459,7 +675,15 @@ let shutdown = (* Host.reboot *) let reboot = - call ~name:"reboot" ~in_product_since:rel_rio + call ~name:"reboot" + ~lifecycle: + [ + ( Published + , rel_rio + , "Reboot the host. (This function can only be called if there are no \ + currently running VMs on the host and it is disabled.)" + ) + ] ~doc: "Reboot the host. (This function can only be called if there are no \ currently running VMs on the host and it is disabled.)" @@ -469,7 +693,14 @@ let reboot = (* Host.prepare_for_poweroff *) let prepare_for_poweroff = - call ~name:"prepare_for_poweroff" ~in_product_since:rel_kolkata + call ~name:"prepare_for_poweroff" + ~lifecycle: + [ + ( Published + , rel_kolkata + , "Performs the necessary actions before host shutdown or reboot." + ) + ] ~doc:"Performs the necessary actions before host shutdown or reboot." ~params: [(Ref _host, "host", "The Host that is about to reboot or shutdown")] @@ -478,13 +709,31 @@ let prepare_for_poweroff = (* Host.power_on *) let power_on = - call ~name:"power_on" ~in_product_since:rel_orlando + call ~name:"power_on" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Attempt to power-on the host (if the capability exists)." + ) + ] ~doc:"Attempt to power-on the host (if the capability exists)." ~params:[(Ref _host, "host", "The Host to power on")] ~allowed_roles:_R_POOL_OP () let restart_agent = - call ~name:"restart_agent" ~in_product_since:rel_rio + call ~name:"restart_agent" + ~lifecycle: + [ + ( Published + , rel_rio + , "Restarts the agent after a 10 second pause. WARNING: this is a \ + dangerous operation. Any operations in progress will be aborted, \ + and unrecoverable data loss may occur. The caller is responsible \ + for ensuring that there are no operations in progress when this \ + method is called." + ) + ] ~doc: "Restarts the agent after a 10 second pause. WARNING: this is a \ dangerous operation. Any operations in progress will be aborted, and \ @@ -496,7 +745,18 @@ let restart_agent = ~allowed_roles:_R_POOL_OP () let shutdown_agent = - call ~name:"shutdown_agent" ~in_product_since:rel_orlando + call ~name:"shutdown_agent" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Shuts the agent down after a 10 second pause. WARNING: this is a \ + dangerous operation. Any operations in progress will be aborted, \ + and unrecoverable data loss may occur. The caller is responsible \ + for ensuring that there are no operations in progress when this \ + method is called." + ) + ] ~doc: "Shuts the agent down after a 10 second pause. WARNING: this is a \ dangerous operation. Any operations in progress will be aborted, and \ @@ -507,31 +767,45 @@ let shutdown_agent = ~allowed_roles:_R_POOL_OP () let dmesg = - call ~name:"dmesg" ~in_product_since:rel_rio ~doc:"Get the host xen dmesg." + call ~name:"dmesg" + ~lifecycle:[(Published, rel_rio, "Get the host xen dmesg.")] + ~doc:"Get the host xen dmesg." ~params:[(Ref _host, "host", "The Host to query")] ~result:(String, "dmesg string") ~allowed_roles:_R_POOL_OP () let dmesg_clear = - call ~name:"dmesg_clear" ~in_product_since:rel_rio + call ~name:"dmesg_clear" + ~lifecycle: + [(Published, rel_rio, "Get the host xen dmesg, and clear the buffer.")] ~doc:"Get the host xen dmesg, and clear the buffer." ~params:[(Ref _host, "host", "The Host to query")] ~result:(String, "dmesg string") ~allowed_roles:_R_POOL_OP () let get_log = - call ~name:"get_log" ~in_product_since:rel_rio ~doc:"Get the host's log file" + call ~name:"get_log" + ~lifecycle:[(Published, rel_rio, "Get the host's log file")] + ~doc:"Get the host's log file" ~params:[(Ref _host, "host", "The Host to query")] ~result:(String, "The contents of the host's primary log file") ~allowed_roles:_R_READ_ONLY () let send_debug_keys = - call ~name:"send_debug_keys" ~in_product_since:rel_rio + call ~name:"send_debug_keys" + ~lifecycle: + [ + ( Published + , rel_rio + , "Inject the given string as debugging keys into Xen" + ) + ] ~doc:"Inject the given string as debugging keys into Xen" ~params: [(Ref _host, "host", "The host"); (String, "keys", "The keys to send")] ~allowed_roles:_R_POOL_ADMIN () let get_data_sources = - call ~name:"get_data_sources" ~in_oss_since:None ~in_product_since:rel_orlando + call ~name:"get_data_sources" ~in_oss_since:None + ~lifecycle:[(Published, rel_orlando, "")] ~doc:"" ~result:(Set (Record _data_source), "A set of data sources") ~params:[(Ref _host, "host", "The host to interrogate")] @@ -539,7 +813,8 @@ let get_data_sources = let record_data_source = call ~name:"record_data_source" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [(Published, rel_orlando, "Start recording the specified data source")] ~doc:"Start recording the specified data source" ~params: [ @@ -550,7 +825,13 @@ let record_data_source = let query_data_source = call ~name:"query_data_source" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Query the latest value of the specified data source" + ) + ] ~doc:"Query the latest value of the specified data source" ~params: [ @@ -561,7 +842,9 @@ let query_data_source = ~errs:[] ~flags:[`Session] ~allowed_roles:_R_READ_ONLY () let attach_static_vdis = - call ~name:"attach_static_vdis" ~in_product_since:rel_midnight_ride + call ~name:"attach_static_vdis" + ~lifecycle: + [(Published, rel_midnight_ride, "Statically attach VDIs on a host.")] ~doc:"Statically attach VDIs on a host." ~params: [ @@ -575,7 +858,9 @@ let attach_static_vdis = () let detach_static_vdis = - call ~name:"detach_static_vdis" ~in_product_since:rel_midnight_ride + call ~name:"detach_static_vdis" + ~lifecycle: + [(Published, rel_midnight_ride, "Detach static VDIs from a host.")] ~doc:"Detach static VDIs from a host." ~params: [ @@ -586,7 +871,16 @@ let detach_static_vdis = () let declare_dead = - call ~name:"declare_dead" ~in_product_since:rel_clearwater + call ~name:"declare_dead" + ~lifecycle: + [ + ( Published + , rel_clearwater + , "Declare that a host is dead. This is a dangerous operation, and \ + should only be called if the administrator is absolutely sure the \ + host is definitely dead" + ) + ] ~doc: "Declare that a host is dead. This is a dangerous operation, and should \ only be called if the administrator is absolutely sure the host is \ @@ -596,7 +890,13 @@ let declare_dead = let forget_data_source_archives = call ~name:"forget_data_source_archives" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Forget the recorded statistics related to the specified data source" + ) + ] ~doc:"Forget the recorded statistics related to the specified data source" ~params: [ @@ -609,7 +909,14 @@ let forget_data_source_archives = ~flags:[`Session] ~allowed_roles:_R_POOL_OP () let get_diagnostic_timing_stats = - call ~flags:[`Session] ~in_product_since:rel_miami + call ~flags:[`Session] + ~lifecycle: + [ + ( Published + , rel_miami + , "Return timing statistics for diagnostic purposes" + ) + ] ~name:"get_diagnostic_timing_stats" ~doc:"Return timing statistics for diagnostic purposes" ~params:[(Ref _host, "host", "The host to interrogate")] @@ -617,7 +924,15 @@ let get_diagnostic_timing_stats = ~hide_from_docs:true ~allowed_roles:_R_READ_ONLY () let create_new_blob = - call ~name:"create_new_blob" ~in_product_since:rel_orlando + call ~name:"create_new_blob" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Create a placeholder for a named binary blob of data that is \ + associated with this host" + ) + ] ~doc: "Create a placeholder for a named binary blob of data that is associated \ with this host" @@ -659,7 +974,8 @@ let create_new_blob = ~allowed_roles:_R_POOL_OP () let call_plugin = - call ~name:"call_plugin" ~in_product_since:rel_orlando + call ~name:"call_plugin" + ~lifecycle:[(Published, rel_orlando, "Call an API plugin on this host")] ~doc:"Call an API plugin on this host" ~params: [ @@ -672,7 +988,14 @@ let call_plugin = ~allowed_roles:_R_POOL_ADMIN () let has_extension = - call ~name:"has_extension" ~in_product_since:rel_ely + call ~name:"has_extension" + ~lifecycle: + [ + ( Published + , rel_ely + , "Return true if the extension is available on the host" + ) + ] ~doc:"Return true if the extension is available on the host" ~params: [ @@ -683,8 +1006,9 @@ let has_extension = ~allowed_roles:_R_POOL_ADMIN () let call_extension = - call ~name:"call_extension" ~in_product_since:rel_ely ~custom_marshaller:true - ~doc:"Call an API extension on this host" + call ~name:"call_extension" + ~lifecycle:[(Published, rel_ely, "Call an API extension on this host")] + ~custom_marshaller:true ~doc:"Call an API extension on this host" ~params: [ (Ref _host, "host", "The host") @@ -695,7 +1019,15 @@ let call_extension = () let enable_binary_storage = - call ~name:"enable_binary_storage" ~in_product_since:rel_orlando + call ~name:"enable_binary_storage" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Enable binary storage on a particular host, for storing RRDs, \ + messages and blobs" + ) + ] ~hide_from_docs:true ~pool_internal:true ~doc: "Enable binary storage on a particular host, for storing RRDs, messages \ @@ -704,7 +1036,15 @@ let enable_binary_storage = ~allowed_roles:_R_LOCAL_ROOT_ONLY () let disable_binary_storage = - call ~name:"disable_binary_storage" ~in_product_since:rel_orlando + call ~name:"disable_binary_storage" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Disable binary storage on a particular host, deleting stored RRDs, \ + messages and blobs" + ) + ] ~hide_from_docs:true ~pool_internal:true ~doc: "Disable binary storage on a particular host, deleting stored RRDs, \ @@ -713,7 +1053,8 @@ let disable_binary_storage = ~allowed_roles:_R_LOCAL_ROOT_ONLY () let update_pool_secret = - call ~name:"update_pool_secret" ~in_product_since:rel_midnight_ride + call ~name:"update_pool_secret" + ~lifecycle:[(Published, rel_midnight_ride, "")] ~hide_from_docs:true ~pool_internal:true ~doc:"" ~params: [ @@ -723,7 +1064,8 @@ let update_pool_secret = ~allowed_roles:_R_LOCAL_ROOT_ONLY () let update_master = - call ~name:"update_master" ~in_product_since:rel_midnight_ride + call ~name:"update_master" + ~lifecycle:[(Published, rel_midnight_ride, "")] ~hide_from_docs:true ~pool_internal:true ~doc:"" ~params: [ @@ -733,7 +1075,9 @@ let update_master = ~allowed_roles:_R_LOCAL_ROOT_ONLY () let set_localdb_key = - call ~name:"set_localdb_key" ~in_product_since:rel_midnight_ride + call ~name:"set_localdb_key" + ~lifecycle: + [(Published, rel_midnight_ride, "Set a key in the local DB of the host.")] ~doc:"Set a key in the local DB of the host." ~params: [ @@ -759,7 +1103,14 @@ let refresh_pack_info = let bugreport_upload = call ~name:"bugreport_upload" ~doc:"Run xen-bugtool --yestoall and upload the output to support" - ~in_oss_since:None ~in_product_since:rel_rio + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Run xen-bugtool --yestoall and upload the output to support" + ) + ] ~params: [ (Ref _host, "host", "The host on which to run xen-bugtool") @@ -769,8 +1120,9 @@ let bugreport_upload = ~allowed_roles:_R_POOL_OP () let list_methods = - call ~name:"list_methods" ~in_product_since:rel_rio ~flags:[`Session] - ~doc:"List all supported methods" ~params:[] + call ~name:"list_methods" + ~lifecycle:[(Published, rel_rio, "List all supported methods")] + ~flags:[`Session] ~doc:"List all supported methods" ~params:[] ~result:(Set String, "The name of every supported method.") ~allowed_roles:_R_READ_ONLY () @@ -946,20 +1298,24 @@ let create_params = ] let create = - call ~name:"create" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"create" ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Create a new host record")] ~versioned_params:create_params ~doc:"Create a new host record" ~result:(Ref _host, "Reference to the newly created host object.") ~hide_from_docs:true ~allowed_roles:_R_POOL_OP () let destroy = - call ~name:"destroy" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"destroy" ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "Destroy specified host record in database")] ~doc:"Destroy specified host record in database" ~params:[(Ref _host, "self", "The host record to remove")] ~allowed_roles:_R_POOL_OP () let get_system_status_capabilities = call ~flags:[`Session] ~name:"get_system_status_capabilities" - ~in_oss_since:None ~in_product_since:rel_miami + ~in_oss_since:None + ~lifecycle:[(Published, rel_miami, "")] ~params:[(Ref _host, "host", "The host to interrogate")] ~doc:"" ~result: @@ -968,7 +1324,14 @@ let get_system_status_capabilities = let set_hostname_live = call ~flags:[`Session] ~name:"set_hostname_live" ~in_oss_since:None - ~in_product_since:rel_miami + ~lifecycle: + [ + ( Published + , rel_miami + , "Sets the host name to the specified string. Both the API and \ + lower-level system hostname are changed immediately." + ) + ] ~params: [ (Ref _host, "host", "The host whose host name to set") @@ -982,7 +1345,14 @@ let set_hostname_live = let tickle_heartbeat = call ~flags:[`Session] ~name:"tickle_heartbeat" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Needs to be called every 30 seconds for the master to believe the \ + host is alive" + ) + ] ~params: [ ( Ref _host @@ -1003,7 +1373,15 @@ let tickle_heartbeat = let sync_data = call ~flags:[`Session] ~name:"sync_data" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "This causes the synchronisation of the non-database data (messages, \ + RRDs and so on) stored on the master to be synchronised with the \ + host" + ) + ] ~params:[(Ref _host, "host", "The host to whom the data should be sent")] ~doc: "This causes the synchronisation of the non-database data (messages, \ @@ -1012,7 +1390,13 @@ let sync_data = let backup_rrds = call ~flags:[`Session] ~name:"backup_rrds" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "This causes the RRDs to be backed up to the master" + ) + ] ~params: [ (Ref _host, "host", "Schedule a backup of the RRDs of this host") @@ -1027,7 +1411,13 @@ let backup_rrds = let get_servertime = call ~flags:[`Session] ~name:"get_servertime" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "This call queries the host's clock for the current time" + ) + ] ~params:[(Ref _host, "host", "The host whose clock should be queried")] ~doc:"This call queries the host's clock for the current time" ~result:(DateTime, "The current time") @@ -1035,7 +1425,14 @@ let get_servertime = let get_server_localtime = call ~flags:[`Session] ~name:"get_server_localtime" ~in_oss_since:None - ~in_product_since:rel_cowley + ~lifecycle: + [ + ( Published + , rel_cowley + , "This call queries the host's clock for the current time in the \ + host's local timezone" + ) + ] ~params:[(Ref _host, "host", "The host whose clock should be queried")] ~doc: "This call queries the host's clock for the current time in the host's \ @@ -1045,7 +1442,14 @@ let get_server_localtime = let emergency_ha_disable = call ~flags:[`Session] ~name:"emergency_ha_disable" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "This call disables HA on the local host. This should only be used \ + with extreme care." + ) + ] ~versioned_params: [ { @@ -1101,8 +1505,15 @@ let certificate_list = () let crl_install = - call ~in_oss_since:None ~in_product_since:rel_george ~pool_internal:true - ~hide_from_docs:true ~name:"crl_install" + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_george + , "Install a TLS CA-issued Certificate Revocation List to this host." + ) + ] + ~pool_internal:true ~hide_from_docs:true ~name:"crl_install" ~doc:"Install a TLS CA-issued Certificate Revocation List to this host." ~params: [ @@ -1113,15 +1524,31 @@ let crl_install = ~allowed_roles:_R_LOCAL_ROOT_ONLY () let crl_uninstall = - call ~in_oss_since:None ~in_product_since:rel_george ~pool_internal:true - ~hide_from_docs:true ~name:"crl_uninstall" + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_george + , "Uninstall a TLS CA-issued certificate revocation list from this \ + host." + ) + ] + ~pool_internal:true ~hide_from_docs:true ~name:"crl_uninstall" ~doc:"Uninstall a TLS CA-issued certificate revocation list from this host." ~params:[(Ref _host, "host", "The host"); (String, "name", "The CRL name")] ~allowed_roles:_R_LOCAL_ROOT_ONLY () let crl_list = - call ~in_oss_since:None ~in_product_since:rel_george ~pool_internal:true - ~hide_from_docs:true ~name:"crl_list" + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_george + , "List the filenames of all installed TLS CA-issued Certificate \ + Revocation Lists." + ) + ] + ~pool_internal:true ~hide_from_docs:true ~name:"crl_list" ~doc: "List the filenames of all installed TLS CA-issued Certificate \ Revocation Lists." @@ -1130,8 +1557,16 @@ let crl_list = ~allowed_roles:_R_LOCAL_ROOT_ONLY () let certificate_sync = - call ~in_oss_since:None ~in_product_since:rel_george ~pool_internal:true - ~hide_from_docs:true ~name:"certificate_sync" + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_george + , "Make installed TLS CA certificates and CRLs available to all \ + programs using OpenSSL." + ) + ] + ~pool_internal:true ~hide_from_docs:true ~name:"certificate_sync" ~doc: "Make installed TLS CA certificates and CRLs available to all programs \ using OpenSSL." @@ -1273,7 +1708,13 @@ let operations = let enable_external_auth = call ~flags:[`Session] ~name:"enable_external_auth" ~in_oss_since:None - ~in_product_since:rel_george + ~lifecycle: + [ + ( Published + , rel_george + , "This call enables external authentication on a host" + ) + ] ~params: [ ( Ref _host @@ -1295,7 +1736,13 @@ let enable_external_auth = let disable_external_auth = call ~flags:[`Session] ~name:"disable_external_auth" ~in_oss_since:None - ~in_product_since:rel_george + ~lifecycle: + [ + ( Published + , rel_george + , "This call disables external authentication on the local host" + ) + ] ~versioned_params: [ { @@ -1320,7 +1767,15 @@ let disable_external_auth = let set_license_params = call ~name:"set_license_params" - ~in_product_since:rel_orlando (* actually update 3 aka floodgate *) + ~lifecycle: + [ + ( Published + , rel_orlando + , "Set the new license details in the database, trigger a \ + recomputation of the pool SKU" + ) + ] + (* actually update 3 aka floodgate *) ~doc: "Set the new license details in the database, trigger a recomputation of \ the pool SKU" @@ -1334,7 +1789,15 @@ let set_license_params = let apply_edition = call ~flags:[`Session] ~name:"apply_edition" - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Change to another edition, or reactivate the current edition after \ + a license has expired. This may be subject to the successful \ + checkout of an appropriate license." + ) + ] ~doc: "Change to another edition, or reactivate the current edition after a \ license has expired. This may be subject to the successful checkout of \ @@ -1373,7 +1836,6 @@ let set_power_on_mode = ; (Changed, rel_stockholm, "Removed iLO script") ; (Changed, "24.19.0", "Replaced DRAC mode with IPMI") ] - ~in_product_since:rel_midnight_ride ~doc:"Set the power-on-mode, host, user and password" ~params: [ @@ -1453,7 +1915,13 @@ let reset_networking = let enable_local_storage_caching = call ~flags:[`Session] ~name:"enable_local_storage_caching" - ~in_product_since:rel_cowley + ~lifecycle: + [ + ( Published + , rel_cowley + , "Enable the use of a local SR for caching purposes" + ) + ] ~doc:"Enable the use of a local SR for caching purposes" ~params: [ @@ -1464,13 +1932,20 @@ let enable_local_storage_caching = let disable_local_storage_caching = call ~flags:[`Session] ~name:"disable_local_storage_caching" - ~in_product_since:rel_cowley + ~lifecycle: + [ + ( Published + , rel_cowley + , "Disable the use of a local SR for caching purposes" + ) + ] ~doc:"Disable the use of a local SR for caching purposes" ~params:[(Ref _host, "host", "The host")] ~allowed_roles:_R_POOL_OP () let get_sm_diagnostics = - call ~flags:[`Session] ~name:"get_sm_diagnostics" ~in_product_since:rel_boston + call ~flags:[`Session] ~name:"get_sm_diagnostics" + ~lifecycle:[(Published, rel_boston, "Return live SM diagnostics")] ~doc:"Return live SM diagnostics" ~params:[(Ref _host, "host", "The host")] ~result:(String, "Printable diagnostic data") @@ -1478,13 +1953,21 @@ let get_sm_diagnostics = let get_thread_diagnostics = call ~flags:[`Session] ~name:"get_thread_diagnostics" - ~in_product_since:rel_boston ~doc:"Return live thread diagnostics" + ~lifecycle:[(Published, rel_boston, "Return live thread diagnostics")] + ~doc:"Return live thread diagnostics" ~params:[(Ref _host, "host", "The host")] ~result:(String, "Printable diagnostic data") ~allowed_roles:_R_POOL_OP ~hide_from_docs:true () let sm_dp_destroy = - call ~flags:[`Session] ~name:"sm_dp_destroy" ~in_product_since:rel_boston + call ~flags:[`Session] ~name:"sm_dp_destroy" + ~lifecycle: + [ + ( Published + , rel_boston + , "Attempt to cleanup and destroy a named SM datapath" + ) + ] ~doc:"Attempt to cleanup and destroy a named SM datapath" ~params: [ @@ -1751,7 +2234,8 @@ let emergency_reenable_tls_verification = ~allowed_roles:_R_LOCAL_ROOT_ONLY () let apply_updates = - call ~name:"apply_updates" ~in_oss_since:None ~in_product_since:"1.301.0" + call ~name:"apply_updates" ~in_oss_since:None + ~lifecycle:[(Published, "1.301.0", "")] ~doc:"apply updates from current enabled repository on a host" ~params: [ @@ -1772,7 +2256,7 @@ let apply_updates = let copy_primary_host_certs = call ~name:"copy_primary_host_certs" ~in_oss_since:None - ~in_product_since:"1.307.0" + ~lifecycle:[(Published, "1.307.0", "")] ~doc:"useful for secondary hosts that are missing some certs" ~params: [ diff --git a/ocaml/idl/datamodel_pool.ml b/ocaml/idl/datamodel_pool.ml index f55f98d47ac..d13858363ab 100644 --- a/ocaml/idl/datamodel_pool.ml +++ b/ocaml/idl/datamodel_pool.ml @@ -62,7 +62,9 @@ let telemetry_frequency = ) let enable_ha = - call ~in_product_since:rel_miami ~name:"enable_ha" ~in_oss_since:None + call + ~lifecycle:[(Published, rel_miami, "Turn on High Availability mode")] + ~name:"enable_ha" ~in_oss_since:None ~versioned_params: [ { @@ -85,19 +87,30 @@ let enable_ha = () let disable_ha = - call ~in_product_since:rel_miami ~name:"disable_ha" ~in_oss_since:None - ~params:[] ~doc:"Turn off High Availability mode" + call + ~lifecycle:[(Published, rel_miami, "Turn off High Availability mode")] + ~name:"disable_ha" ~in_oss_since:None ~params:[] + ~doc:"Turn off High Availability mode" ~allowed_roles:(_R_POOL_OP ++ _R_CLIENT_CERT) () let sync_database = - call ~name:"sync_database" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"sync_database" ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Forcibly synchronise the database now")] ~params:[] ~doc:"Forcibly synchronise the database now" ~allowed_roles:_R_POOL_OP () let designate_new_master = - call ~in_product_since:rel_miami ~name:"designate_new_master" - ~in_oss_since:None + call + ~lifecycle: + [ + ( Published + , rel_miami + , "Perform an orderly handover of the role of master to the referenced \ + host." + ) + ] + ~name:"designate_new_master" ~in_oss_since:None ~params:[(Ref _host, "host", "The host who should become the new master")] ~doc: "Perform an orderly handover of the role of master to the referenced \ @@ -105,7 +118,8 @@ let designate_new_master = ~allowed_roles:_R_POOL_OP () let join = - call ~name:"join" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"join" ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Instruct host to join a new pool")] ~params: [ ( String @@ -125,7 +139,8 @@ let join = ~doc:"Instruct host to join a new pool" ~allowed_roles:_R_POOL_OP () let join_force = - call ~name:"join_force" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"join_force" ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Instruct host to join a new pool")] ~params: [ ( String @@ -148,7 +163,7 @@ let certs = Map (String, String) let exchange_certificates_on_join = call ~name:"exchange_certificates_on_join" ~in_oss_since:None - ~in_product_since:"1.298.0" + ~lifecycle:[(Published, "1.298.0", "")] ~params: [ (String, "uuid", "The uuid of the joining host") @@ -162,7 +177,7 @@ let exchange_certificates_on_join = let exchange_ca_certificates_on_join = call ~name:"exchange_ca_certificates_on_join" ~in_oss_since:None - ~in_product_since:"1.320.0" + ~lifecycle:[(Published, "1.320.0", "")] ~params: [ (certs, "import", "The CA certificates that are to be installed") @@ -179,19 +194,41 @@ let exchange_ca_certificates_on_join = let slave_reset_master = call ~flags:[`Session] ~name:"emergency_reset_master" ~in_oss_since:None - ~in_product_since:rel_rio + ~lifecycle: + [ + ( Published + , rel_rio + , "Instruct a slave already in a pool that the master has changed" + ) + ] ~params:[(String, "master_address", "The hostname of the master")] ~doc:"Instruct a slave already in a pool that the master has changed" ~allowed_roles:_R_POOL_OP () let transition_to_master = call ~flags:[`Session] ~name:"emergency_transition_to_master" - ~in_oss_since:None ~in_product_since:rel_rio ~params:[] + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Instruct host that's currently a slave to transition to being master" + ) + ] + ~params:[] ~doc:"Instruct host that's currently a slave to transition to being master" ~allowed_roles:_R_POOL_OP () let recover_slaves = - call ~name:"recover_slaves" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"recover_slaves" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Instruct a pool master, M, to try and contact its slaves and, if \ + slaves are in emergency mode, reset their master address to M." + ) + ] ~params:[] ~result: ( Set (Ref _host) @@ -203,18 +240,29 @@ let recover_slaves = ~allowed_roles:_R_POOL_OP () let eject = - call ~name:"eject" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"eject" ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Instruct a pool master to eject a host from the pool" + ) + ] ~params:[(Ref _host, "host", "The host to eject")] ~doc:"Instruct a pool master to eject a host from the pool" ~allowed_roles:_R_POOL_OP () let initial_auth = - call ~name:"initial_auth" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"initial_auth" ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Internal use only")] ~params:[] ~result:(SecretString, "") ~doc:"Internal use only" ~hide_from_docs:true ~allowed_roles:_R_POOL_OP () let create_VLAN_from_PIF = - call ~in_oss_since:None ~in_product_since:rel_rio ~name:"create_VLAN_from_PIF" + call ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "Create a pool-wide VLAN by taking the PIF.")] + ~name:"create_VLAN_from_PIF" ~doc:"Create a pool-wide VLAN by taking the PIF." ~params: [ @@ -236,7 +284,17 @@ let create_VLAN_from_PIF = (* !! THIS IS BROKEN; it takes a device name which in the case of a bond is not homogeneous across all pool hosts. See CA-22613. !! *) let create_VLAN = - call ~in_oss_since:None ~in_product_since:rel_rio ~name:"create_VLAN" + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Create PIFs, mapping a network to the same physical interface/VLAN \ + on each host. This call is deprecated: use \ + Pool.create_VLAN_from_PIF instead." + ) + ] + ~name:"create_VLAN" ~doc: "Create PIFs, mapping a network to the same physical interface/VLAN on \ each host. This call is deprecated: use Pool.create_VLAN_from_PIF \ @@ -259,7 +317,14 @@ let create_VLAN = let management_reconfigure = call ~name:"management_reconfigure" ~in_oss_since:None - ~in_product_since:rel_inverness + ~lifecycle: + [ + ( Published + , rel_inverness + , "Reconfigure the management network interface for all Hosts in the \ + Pool" + ) + ] ~params:[(Ref _network, "network", "The network")] ~doc: "Reconfigure the management network interface for all Hosts in the Pool" @@ -281,14 +346,15 @@ let hello_return = ) let hello = - call ~name:"hello" ~in_oss_since:None ~in_product_since:rel_rio + call ~name:"hello" ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Internal use only")] ~params:[(String, "host_uuid", ""); (String, "host_address", "")] ~result:(hello_return, "") ~doc:"Internal use only" ~hide_from_docs:true ~allowed_roles:_R_POOL_OP () let ping_slave = call ~flags:[`Session] ~name:"is_slave" ~in_oss_since:None - ~in_product_since:rel_rio + ~lifecycle:[(Published, rel_rio, "Internal use only")] ~params:[(Ref _host, "host", "")] ~doc:"Internal use only" ~result: @@ -300,7 +366,15 @@ let ping_slave = let ha_prevent_restarts_for = call ~flags:[`Session] ~name:"ha_prevent_restarts_for" - ~in_product_since:rel_orlando_update_1 + ~lifecycle: + [ + ( Published + , rel_orlando_update_1 + , "When this call returns the VM restart logic will not run for the \ + requested number of seconds. If the argument is zero then the \ + restart thread is immediately unblocked" + ) + ] ~doc: "When this call returns the VM restart logic will not run for the \ requested number of seconds. If the argument is zero then the restart \ @@ -313,7 +387,14 @@ let ha_prevent_restarts_for = let ha_failover_plan_exists = call ~flags:[`Session] ~name:"ha_failover_plan_exists" - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Returns true if a VM failover plan exists for up to 'n' host \ + failures" + ) + ] ~doc:"Returns true if a VM failover plan exists for up to 'n' host failures" ~params:[(Int, "n", "The number of host failures to plan for")] ~result: @@ -325,7 +406,14 @@ let ha_failover_plan_exists = let ha_compute_max_host_failures_to_tolerate = call ~flags:[`Session] ~name:"ha_compute_max_host_failures_to_tolerate" - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Returns the maximum number of host failures we could tolerate \ + before we would be unable to restart configured VMs" + ) + ] ~doc: "Returns the maximum number of host failures we could tolerate before we \ would be unable to restart configured VMs" @@ -340,7 +428,14 @@ let ha_compute_max_host_failures_to_tolerate = let ha_compute_hypothetical_max_host_failures_to_tolerate = call ~flags:[`Session] ~name:"ha_compute_hypothetical_max_host_failures_to_tolerate" - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Returns the maximum number of host failures we could tolerate \ + before we would be unable to restart the provided VMs" + ) + ] ~doc: "Returns the maximum number of host failures we could tolerate before we \ would be unable to restart the provided VMs" @@ -360,7 +455,13 @@ let ha_compute_hypothetical_max_host_failures_to_tolerate = let ha_compute_vm_failover_plan = call ~flags:[`Session] ~name:"ha_compute_vm_failover_plan" - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Return a VM failover plan assuming a given subset of hosts fail" + ) + ] ~doc:"Return a VM failover plan assuming a given subset of hosts fail" ~params: [ @@ -377,7 +478,15 @@ let ha_compute_vm_failover_plan = ~allowed_roles:_R_POOL_OP () let create_new_blob = - call ~name:"create_new_blob" ~in_product_since:rel_orlando + call ~name:"create_new_blob" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Create a placeholder for a named binary blob of data that is \ + associated with this pool" + ) + ] ~doc: "Create a placeholder for a named binary blob of data that is associated \ with this pool" @@ -419,7 +528,15 @@ let create_new_blob = ~allowed_roles:_R_POOL_OP () let set_ha_host_failures_to_tolerate = - call ~name:"set_ha_host_failures_to_tolerate" ~in_product_since:rel_orlando + call ~name:"set_ha_host_failures_to_tolerate" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Set the maximum number of host failures to consider in the HA VM \ + restart planner" + ) + ] ~doc: "Set the maximum number of host failures to consider in the HA VM \ restart planner" @@ -431,13 +548,29 @@ let set_ha_host_failures_to_tolerate = ~allowed_roles:_R_POOL_OP () let ha_schedule_plan_recomputation = - call ~name:"ha_schedule_plan_recomputation" ~in_product_since:rel_orlando + call ~name:"ha_schedule_plan_recomputation" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Signal that the plan should be recomputed (eg a host has come \ + online)" + ) + ] ~doc:"Signal that the plan should be recomputed (eg a host has come online)" ~params:[] ~hide_from_docs:true ~pool_internal:true ~allowed_roles:_R_LOCAL_ROOT_ONLY () let enable_binary_storage = - call ~name:"enable_binary_storage" ~in_product_since:rel_orlando + call ~name:"enable_binary_storage" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Enable the storage of larger objects, such as RRDs, messages and \ + binary blobs across all hosts in the pool" + ) + ] ~hide_from_docs:true ~doc: "Enable the storage of larger objects, such as RRDs, messages and binary \ @@ -445,7 +578,16 @@ let enable_binary_storage = ~params:[] ~allowed_roles:_R_POOL_OP () let disable_binary_storage = - call ~name:"disable_binary_storage" ~in_product_since:rel_orlando + call ~name:"disable_binary_storage" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Disable the storage of larger objects, such as RRDs, messages and \ + binary blobs across all hosts in the pool. This will destroy all of \ + these objects where they exist." + ) + ] ~hide_from_docs:true ~doc: "Disable the storage of larger objects, such as RRDs, messages and \ @@ -455,7 +597,14 @@ let disable_binary_storage = let enable_external_auth = call ~flags:[`Session] ~name:"enable_external_auth" ~in_oss_since:None - ~in_product_since:rel_george + ~lifecycle: + [ + ( Published + , rel_george + , "This call enables external authentication on all the hosts of the \ + pool" + ) + ] ~params: [ ( Ref _pool @@ -478,7 +627,14 @@ let enable_external_auth = let disable_external_auth = call ~flags:[`Session] ~name:"disable_external_auth" ~in_oss_since:None - ~in_product_since:rel_george + ~lifecycle: + [ + ( Published + , rel_george + , "This call disables external authentication on all the hosts of the \ + pool" + ) + ] ~versioned_params: [ { @@ -504,7 +660,16 @@ let disable_external_auth = let detect_nonhomogeneous_external_auth = call ~flags:[`Session] ~name:"detect_nonhomogeneous_external_auth" - ~in_oss_since:None ~in_product_since:rel_george + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_george + , "This call asynchronously detects if the external authentication \ + configuration in any slave is different from that in the master and \ + raises appropriate alerts" + ) + ] ~params: [ ( Ref _pool @@ -520,7 +685,15 @@ let detect_nonhomogeneous_external_auth = ~allowed_roles:_R_POOL_OP () let initialize_wlb = - call ~name:"initialize_wlb" ~in_product_since:rel_george + call ~name:"initialize_wlb" + ~lifecycle: + [ + ( Published + , rel_george + , "Initializes workload balancing monitoring on this pool with the \ + specified wlb server" + ) + ] ~doc: "Initializes workload balancing monitoring on this pool with the \ specified wlb server" @@ -552,12 +725,27 @@ let initialize_wlb = ~allowed_roles:_R_POOL_OP () let deconfigure_wlb = - call ~name:"deconfigure_wlb" ~in_product_since:rel_george + call ~name:"deconfigure_wlb" + ~lifecycle: + [ + ( Published + , rel_george + , "Permanently deconfigures workload balancing monitoring on this pool" + ) + ] ~doc:"Permanently deconfigures workload balancing monitoring on this pool" ~params:[] ~allowed_roles:_R_POOL_OP () let send_wlb_configuration = - call ~name:"send_wlb_configuration" ~in_product_since:rel_george + call ~name:"send_wlb_configuration" + ~lifecycle: + [ + ( Published + , rel_george + , "Sets the pool optimization criteria for the workload balancing \ + server" + ) + ] ~doc:"Sets the pool optimization criteria for the workload balancing server" ~params: [ @@ -569,7 +757,15 @@ let send_wlb_configuration = ~allowed_roles:_R_POOL_OP () let retrieve_wlb_configuration = - call ~name:"retrieve_wlb_configuration" ~in_product_since:rel_george + call ~name:"retrieve_wlb_configuration" + ~lifecycle: + [ + ( Published + , rel_george + , "Retrieves the pool optimization criteria from the workload \ + balancing server" + ) + ] ~doc: "Retrieves the pool optimization criteria from the workload balancing \ server" @@ -579,7 +775,15 @@ let retrieve_wlb_configuration = ~allowed_roles:_R_READ_ONLY () let retrieve_wlb_recommendations = - call ~name:"retrieve_wlb_recommendations" ~in_product_since:rel_george + call ~name:"retrieve_wlb_recommendations" + ~lifecycle: + [ + ( Published + , rel_george + , "Retrieves vm migrate recommendations for the pool from the workload \ + balancing server" + ) + ] ~doc: "Retrieves vm migrate recommendations for the pool from the workload \ balancing server" @@ -589,7 +793,15 @@ let retrieve_wlb_recommendations = ~allowed_roles:_R_READ_ONLY () let send_test_post = - call ~name:"send_test_post" ~in_product_since:rel_george + call ~name:"send_test_post" + ~lifecycle: + [ + ( Published + , rel_george + , "Send the given body to the given host and port, using HTTPS, and \ + print the response. This is used for debugging the SSL layer." + ) + ] ~doc: "Send the given body to the given host and port, using HTTPS, and print \ the response. This is used for debugging the SSL layer." @@ -657,20 +869,45 @@ let certificate_list = () let crl_install = - call ~in_oss_since:None ~in_product_since:rel_george ~name:"crl_install" + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_george + , "Install a TLS CA-issued Certificate Revocation List, pool-wide." + ) + ] + ~name:"crl_install" ~doc:"Install a TLS CA-issued Certificate Revocation List, pool-wide." ~params: [(String, "name", "A name to give the CRL"); (String, "cert", "The CRL")] ~allowed_roles:_R_POOL_OP () let crl_uninstall = - call ~in_oss_since:None ~in_product_since:rel_george ~name:"crl_uninstall" + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_george + , "Remove a pool-wide TLS CA-issued Certificate Revocation List." + ) + ] + ~name:"crl_uninstall" ~doc:"Remove a pool-wide TLS CA-issued Certificate Revocation List." ~params:[(String, "name", "The CRL name")] ~allowed_roles:_R_POOL_OP () let crl_list = - call ~in_oss_since:None ~in_product_since:rel_george ~name:"crl_list" + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_george + , "List the names of all installed TLS CA-issued Certificate \ + Revocation Lists." + ) + ] + ~name:"crl_list" ~doc: "List the names of all installed TLS CA-issued Certificate Revocation \ Lists." @@ -678,7 +915,15 @@ let crl_list = ~allowed_roles:_R_POOL_OP () let certificate_sync = - call ~in_oss_since:None ~in_product_since:rel_george ~name:"certificate_sync" + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_george + , "Copy the TLS CA certificates and CRLs of the master to all slaves." + ) + ] + ~name:"certificate_sync" ~doc:"Copy the TLS CA certificates and CRLs of the master to all slaves." ~allowed_roles:_R_POOL_OP () @@ -690,7 +935,15 @@ let enable_tls_verification = ~allowed_roles:_R_POOL_ADMIN () let enable_redo_log = - call ~in_oss_since:None ~in_product_since:rel_midnight_ride + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Enable the redo log on the given SR and start using it, unless HA \ + is enabled." + ) + ] ~name:"enable_redo_log" ~params:[(Ref _sr, "sr", "SR to hold the redo log.")] ~doc: @@ -699,20 +952,34 @@ let enable_redo_log = ~allowed_roles:_R_POOL_OP () let disable_redo_log = - call ~in_oss_since:None ~in_product_since:rel_midnight_ride + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Disable the redo log if in use, unless HA is enabled." + ) + ] ~name:"disable_redo_log" ~doc:"Disable the redo log if in use, unless HA is enabled." ~allowed_roles:_R_POOL_OP () let audit_log_append = call ~in_oss_since:None ~pool_internal:true ~hide_from_docs:true - ~in_product_since:rel_midnight_ride ~name:"audit_log_append" + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Append a line to the audit log on the master." + ) + ] + ~name:"audit_log_append" ~params:[(String, "line", "line to be appended to the audit log")] ~doc:"Append a line to the audit log on the master." ~allowed_roles:_R_POOL_ADMIN () let set_vswitch_controller = - call ~in_oss_since:None ~in_product_since:rel_midnight_ride + call ~in_oss_since:None ~lifecycle: [ ( Published @@ -736,7 +1003,8 @@ let set_vswitch_controller = let test_archive_target = call ~flags:[`Session] ~name:"test_archive_target" ~in_oss_since:None - ~in_product_since:rel_cowley + ~lifecycle: + [(Published, rel_cowley, "This call tests if a location is valid")] ~params: [ (Ref _pool, "self", "Reference to the pool") @@ -748,21 +1016,39 @@ let test_archive_target = let enable_local_storage_caching = call ~name:"enable_local_storage_caching" ~in_oss_since:None - ~in_product_since:rel_cowley + ~lifecycle: + [ + ( Published + , rel_cowley + , "This call attempts to enable pool-wide local storage caching" + ) + ] ~params:[(Ref _pool, "self", "Reference to the pool")] ~doc:"This call attempts to enable pool-wide local storage caching" ~allowed_roles:_R_POOL_OP () let disable_local_storage_caching = call ~name:"disable_local_storage_caching" ~in_oss_since:None - ~in_product_since:rel_cowley + ~lifecycle: + [ + ( Published + , rel_cowley + , "This call disables pool-wide local storage caching" + ) + ] ~params:[(Ref _pool, "self", "Reference to the pool")] ~doc:"This call disables pool-wide local storage caching" ~allowed_roles:_R_POOL_OP () let get_license_state = call ~name:"get_license_state" ~in_oss_since:None - ~in_product_since:rel_clearwater + ~lifecycle: + [ + ( Published + , rel_clearwater + , "This call returns the license state for the pool" + ) + ] ~params:[(Ref _pool, "self", "Reference to the pool")] ~doc:"This call returns the license state for the pool" ~allowed_roles:_R_READ_ONLY @@ -770,7 +1056,9 @@ let get_license_state = () let apply_edition = - call ~name:"apply_edition" ~in_oss_since:None ~in_product_since:rel_clearwater + call ~name:"apply_edition" ~in_oss_since:None + ~lifecycle: + [(Published, rel_clearwater, "Apply an edition to all hosts in the pool")] ~params: [ (Ref _pool, "self", "Reference to the pool") @@ -825,7 +1113,14 @@ let set_igmp_snooping_enabled = ~allowed_roles:_R_POOL_OP () let has_extension = - call ~name:"has_extension" ~in_product_since:rel_dundee + call ~name:"has_extension" + ~lifecycle: + [ + ( Published + , rel_dundee + , "Return true if the extension is available on the pool" + ) + ] ~doc:"Return true if the extension is available on the pool" ~params: [ @@ -836,7 +1131,14 @@ let has_extension = ~allowed_roles:_R_POOL_ADMIN () let add_to_guest_agent_config = - call ~name:"add_to_guest_agent_config" ~in_product_since:rel_dundee + call ~name:"add_to_guest_agent_config" + ~lifecycle: + [ + ( Published + , rel_dundee + , "Add a key-value pair to the pool-wide guest agent configuration" + ) + ] ~doc:"Add a key-value pair to the pool-wide guest agent configuration" ~params: [ @@ -847,14 +1149,23 @@ let add_to_guest_agent_config = ~allowed_roles:_R_POOL_ADMIN () let remove_from_guest_agent_config = - call ~name:"remove_from_guest_agent_config" ~in_product_since:rel_dundee + call ~name:"remove_from_guest_agent_config" + ~lifecycle: + [ + ( Published + , rel_dundee + , "Remove a key-value pair from the pool-wide guest agent configuration" + ) + ] ~doc:"Remove a key-value pair from the pool-wide guest agent configuration" ~params: [(Ref _pool, "self", "The pool"); (String, "key", "The key to remove")] ~allowed_roles:_R_POOL_ADMIN () let rotate_secret = - call ~in_product_since:rel_stockholm_psr ~name:"rotate_secret" ~params:[] + call + ~lifecycle:[(Published, rel_stockholm_psr, "")] + ~name:"rotate_secret" ~params:[] ~errs: [ Api_errors.internal_error @@ -866,7 +1177,8 @@ let rotate_secret = ~allowed_roles:_R_POOL_ADMIN () let set_repositories = - call ~name:"set_repositories" ~in_product_since:"1.301.0" + call ~name:"set_repositories" + ~lifecycle:[(Published, "1.301.0", "")] ~doc:"Set enabled set of repositories" ~params: [ @@ -877,7 +1189,8 @@ let set_repositories = () let add_repository = - call ~name:"add_repository" ~in_product_since:"1.301.0" + call ~name:"add_repository" + ~lifecycle:[(Published, "1.301.0", "")] ~doc:"Add a repository to the enabled set" ~params: [ @@ -891,7 +1204,8 @@ let add_repository = () let remove_repository = - call ~name:"remove_repository" ~in_product_since:"1.301.0" + call ~name:"remove_repository" + ~lifecycle:[(Published, "1.301.0", "")] ~doc:"Remove a repository from the enabled set" ~params: [ @@ -902,7 +1216,8 @@ let remove_repository = () let sync_updates = - call ~name:"sync_updates" ~in_product_since:"1.329.0" + call ~name:"sync_updates" + ~lifecycle:[(Published, "1.329.0", "")] ~doc:"Sync with the enabled repository" ~versioned_params: [ @@ -1003,7 +1318,8 @@ let disable_client_certificate_auth = () let configure_repository_proxy = - call ~name:"configure_repository_proxy" ~in_product_since:"21.3.0" + call ~name:"configure_repository_proxy" + ~lifecycle:[(Published, "21.3.0", "")] ~doc:"Configure proxy for RPM package repositories." ~params: [ @@ -1022,7 +1338,8 @@ let configure_repository_proxy = () let disable_repository_proxy = - call ~name:"disable_repository_proxy" ~in_product_since:"21.4.0" + call ~name:"disable_repository_proxy" + ~lifecycle:[(Published, "21.4.0", "")] ~doc:"Disable the proxy for RPM package repositories." ~params:[(Ref _pool, "self", "The pool")] ~allowed_roles:(_R_POOL_OP ++ _R_CLIENT_CERT) diff --git a/ocaml/idl/datamodel_vm.ml b/ocaml/idl/datamodel_vm.ml index 377d4c0f667..8a5120ca679 100644 --- a/ocaml/idl/datamodel_vm.ml +++ b/ocaml/idl/datamodel_vm.ml @@ -155,7 +155,8 @@ let actions = let set_actions_after_crash = call ~name:"set_actions_after_crash" ~in_oss_since:None - ~in_product_since:rel_rio ~doc:"Sets the actions_after_crash parameter" + ~lifecycle:[(Published, rel_rio, "Sets the actions_after_crash parameter")] + ~doc:"Sets the actions_after_crash parameter" ~params: [ (Ref _vm, "self", "The VM to set") @@ -197,7 +198,8 @@ let get_boot_record = ~allowed_roles:_R_READ_ONLY () let get_data_sources = - call ~name:"get_data_sources" ~in_oss_since:None ~in_product_since:rel_orlando + call ~name:"get_data_sources" ~in_oss_since:None + ~lifecycle:[(Published, rel_orlando, "")] ~doc:"" ~result:(Set (Record _data_source), "A set of data sources") ~params:[(Ref _vm, "self", "The VM to interrogate")] @@ -205,7 +207,8 @@ let get_data_sources = let record_data_source = call ~name:"record_data_source" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [(Published, rel_orlando, "Start recording the specified data source")] ~doc:"Start recording the specified data source" ~params: [ @@ -216,7 +219,13 @@ let record_data_source = let query_data_source = call ~name:"query_data_source" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Query the latest value of the specified data source" + ) + ] ~doc:"Query the latest value of the specified data source" ~params: [ @@ -228,7 +237,13 @@ let query_data_source = let forget_data_source_archives = call ~name:"forget_data_source_archives" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Forget the recorded statistics related to the specified data source" + ) + ] ~doc:"Forget the recorded statistics related to the specified data source" ~params: [ @@ -242,14 +257,24 @@ let forget_data_source_archives = let set_ha_always_run = call ~name:"set_ha_always_run" ~in_oss_since:None - ~in_product_since:rel_orlando ~doc:"Set the value of the ha_always_run" + ~lifecycle: + [ + (Published, rel_orlando, "Set the value of the ha_always_run") + ; (Deprecated, rel_boston, "") + ] + ~doc:"Set the value of the ha_always_run" ~params:[(Ref _vm, "self", "The VM"); (Bool, "value", "The value")] - ~flags:[`Session] ~allowed_roles:_R_POOL_OP - ~internal_deprecated_since:rel_boston () + ~flags:[`Session] ~allowed_roles:_R_POOL_OP () let set_ha_restart_priority = call ~name:"set_ha_restart_priority" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Set the value of the ha_restart_priority field" + ) + ] ~doc:"Set the value of the ha_restart_priority field" ~params:[(Ref _vm, "self", "The VM"); (String, "value", "The value")] ~flags:[`Session] ~allowed_roles:_R_POOL_OP () @@ -257,7 +282,17 @@ let set_ha_restart_priority = (* VM.Clone *) let clone = - call ~name:"clone" ~in_product_since:rel_rio + call ~name:"clone" + ~lifecycle: + [ + ( Published + , rel_rio + , "Clones the specified VM, making a new VM. Clone automatically \ + exploits the capabilities of the underlying storage repository in \ + which the VM's disk images are stored (e.g. Copy on Write). This \ + function can only be called when the VM is in the Halted State." + ) + ] ~doc: "Clones the specified VM, making a new VM. Clone automatically exploits \ the capabilities of the underlying storage repository in which the VM's \ @@ -346,8 +381,10 @@ let snapshot_with_quiesce = ~allowed_roles:_R_VM_POWER_ADMIN () let update_snapshot_metadata = - call ~name:"update_snapshot_metadata" ~in_product_since:rel_george - ~internal_deprecated_since:rel_midnight_ride ~doc:"" ~hide_from_docs:true + call ~name:"update_snapshot_metadata" + ~lifecycle: + [(Published, rel_george, ""); (Deprecated, rel_midnight_ride, "")] + ~doc:"" ~hide_from_docs:true ~params: [ (Ref _vm, "vm", "The VM to update") @@ -362,7 +399,16 @@ let update_snapshot_metadata = ~allowed_roles:_R_POOL_OP () let snapshot = - call ~name:"snapshot" ~in_product_since:rel_orlando + call ~name:"snapshot" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Snapshots the specified VM, making a new VM. Snapshot automatically \ + exploits the capabilities of the underlying storage repository in \ + which the VM's disk images are stored (e.g. Copy on Write)." + ) + ] ~doc: "Snapshots the specified VM, making a new VM. Snapshot automatically \ exploits the capabilities of the underlying storage repository in which \ @@ -401,7 +447,14 @@ let snapshot = ~allowed_roles:_R_VM_POWER_ADMIN ~doc_tags:[Snapshots] () let revert = - call ~name:"revert" ~in_product_since:rel_midnight_ride + call ~name:"revert" + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Reverts the specified VM to a previous state." + ) + ] ~doc:"Reverts the specified VM to a previous state." ~params:[(Ref _vm, "snapshot", "The snapshotted state that we revert to")] ~errs: @@ -414,7 +467,17 @@ let revert = ~allowed_roles:_R_VM_POWER_ADMIN ~doc_tags:[Snapshots] () let checkpoint = - call ~name:"checkpoint" ~in_product_since:rel_midnight_ride + call ~name:"checkpoint" + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Checkpoints the specified VM, making a new VM. Checkpoint \ + automatically exploits the capabilities of the underlying storage \ + repository in which the VM's disk images are stored (e.g. Copy on \ + Write) and saves the memory image as well." + ) + ] ~doc: "Checkpoints the specified VM, making a new VM. Checkpoint automatically \ exploits the capabilities of the underlying storage repository in which \ @@ -438,8 +501,14 @@ let checkpoint = let create_template = call ~name:"create_template" ~hide_from_docs:true - ~internal_deprecated_since:rel_midnight_ride - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Deprecated: use VM.clone or VM.copy instead." + ) + ; (Deprecated, rel_midnight_ride, "") + ] ~doc:"Deprecated: use VM.clone or VM.copy instead." ~result:(Ref _vm, "") ~params:[(Ref _vm, "vm", ""); (String, "new_name", "")] ~errs:[] ~allowed_roles:_R_VM_ADMIN () @@ -456,7 +525,8 @@ let set_is_default_template = ~errs:[] ~allowed_roles:_R_POOL_ADMIN () let import_convert = - call ~name:"import_convert" ~in_product_since:rel_tampa + call ~name:"import_convert" + ~lifecycle:[(Published, rel_tampa, "Import using a conversion service.")] ~doc:"Import using a conversion service." ~params: [ @@ -477,13 +547,30 @@ let provision = creates VDIs and VBDs and then executes any applicable post-install \ script." ~params:[(Ref _vm, "vm", "The VM to be provisioned")] - ~in_oss_since:None ~in_product_since:rel_rio ~errs:(errnames_of_call clone) - ~allowed_roles:_R_VM_ADMIN () + ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Inspects the disk configuration contained within the VM's \ + other_config, creates VDIs and VBDs and then executes any \ + applicable post-install script." + ) + ] + ~errs:(errnames_of_call clone) ~allowed_roles:_R_VM_ADMIN () (* VM.Start *) let start = - call ~name:"start" ~in_product_since:rel_rio + call ~name:"start" + ~lifecycle: + [ + ( Published + , rel_rio + , "Start the specified VM. This function can only be called with the \ + VM is in the Halted State." + ) + ] ~doc: "Start the specified VM. This function can only be called with the VM \ is in the Halted State." @@ -513,7 +600,7 @@ let start = ~allowed_roles:_R_VM_OP () let assert_can_boot_here = - call ~name:"assert_can_boot_here" ~in_product_since:rel_rio + call ~name:"assert_can_boot_here" ~lifecycle: [ (Published, rel_rio, "") @@ -553,7 +640,15 @@ let assert_can_boot_here = ~doc_tags:[Memory] () let assert_agile = - call ~name:"assert_agile" ~in_product_since:rel_orlando + call ~name:"assert_agile" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Returns an error if the VM is not considered agile e.g. because it \ + is tied to a resource local to a host" + ) + ] ~doc: "Returns an error if the VM is not considered agile e.g. because it is \ tied to a resource local to a host" @@ -561,14 +656,30 @@ let assert_agile = ~allowed_roles:_R_READ_ONLY () let get_possible_hosts = - call ~name:"get_possible_hosts" ~in_product_since:rel_rio + call ~name:"get_possible_hosts" + ~lifecycle: + [ + ( Published + , rel_rio + , "Return the list of hosts on which this VM may run." + ) + ] ~doc:"Return the list of hosts on which this VM may run." ~params:[(Ref _vm, "vm", "The VM")] ~result:(Set (Ref _host), "The possible hosts") ~allowed_roles:_R_READ_ONLY () let retrieve_wlb_recommendations = - call ~name:"retrieve_wlb_recommendations" ~in_product_since:rel_george + call ~name:"retrieve_wlb_recommendations" + ~lifecycle: + [ + ( Published + , rel_george + , "Returns mapping of hosts to ratings, indicating the suitability of \ + starting the VM at that location according to wlb. Rating is \ + replaced with an error if the VM cannot boot there." + ) + ] ~doc: "Returns mapping of hosts to ratings, indicating the suitability of \ starting the VM at that location according to wlb. Rating is replaced \ @@ -581,7 +692,19 @@ let retrieve_wlb_recommendations = ~allowed_roles:_R_READ_ONLY () let maximise_memory = - call ~in_product_since:rel_miami ~name:"maximise_memory" + call + ~lifecycle: + [ + ( Published + , rel_miami + , "Returns the maximum amount of guest memory which will fit, together \ + with overheads, in the supplied amount of physical memory. If \ + 'exact' is true then an exact calculation is performed using the \ + VM's current settings. If 'exact' is false then a more conservative \ + approximation is used" + ) + ] + ~name:"maximise_memory" ~doc: "Returns the maximum amount of guest memory which will fit, together \ with overheads, in the supplied amount of physical memory. If 'exact' \ @@ -603,7 +726,15 @@ let maximise_memory = ~allowed_roles:_R_READ_ONLY ~doc_tags:[Memory] () let get_allowed_VBD_devices = - call ~flags:[`Session] ~no_current_operations:true ~in_product_since:rel_rio + call ~flags:[`Session] ~no_current_operations:true + ~lifecycle: + [ + ( Published + , rel_rio + , "Returns a list of the allowed values that a VBD device field can \ + take" + ) + ] ~name:"get_allowed_VBD_devices" ~doc:"Returns a list of the allowed values that a VBD device field can take" ~params:[(Ref _vm, "vm", "The VM to query")] @@ -611,7 +742,15 @@ let get_allowed_VBD_devices = ~allowed_roles:_R_READ_ONLY () let get_allowed_VIF_devices = - call ~flags:[`Session] ~no_current_operations:true ~in_product_since:rel_rio + call ~flags:[`Session] ~no_current_operations:true + ~lifecycle: + [ + ( Published + , rel_rio + , "Returns a list of the allowed values that a VIF device field can \ + take" + ) + ] ~name:"get_allowed_VIF_devices" ~doc:"Returns a list of the allowed values that a VIF device field can take" ~params:[(Ref _vm, "vm", "The VM to query")] @@ -622,8 +761,10 @@ let get_allowed_VIF_devices = (* an internal call that sets resident_on and clears the scheduled_to_be_resident_on atomically *) let atomic_set_resident_on = - call ~in_product_since:rel_rio ~pool_internal:true ~hide_from_docs:true - ~name:"atomic_set_resident_on" ~doc:"" + call + ~lifecycle:[(Published, rel_rio, "")] + ~pool_internal:true ~hide_from_docs:true ~name:"atomic_set_resident_on" + ~doc:"" ~params: [ (Ref _vm, "vm", "The VM to modify") @@ -632,7 +773,15 @@ let atomic_set_resident_on = ~allowed_roles:_R_LOCAL_ROOT_ONLY () let compute_memory_overhead = - call ~in_product_since:rel_midnight_ride ~name:"compute_memory_overhead" + call + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Computes the virtualization memory overhead of a VM." + ) + ] + ~name:"compute_memory_overhead" ~doc:"Computes the virtualization memory overhead of a VM." ~params:[(Ref _vm, "vm", "The VM for which to compute the memory overhead")] ~pool_internal:false ~hide_from_docs:false @@ -640,7 +789,14 @@ let compute_memory_overhead = ~allowed_roles:_R_READ_ONLY ~doc_tags:[Memory] () let set_memory_dynamic_max = - call ~flags:[`Session] ~in_product_since:rel_midnight_ride + call ~flags:[`Session] + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Set the value of the memory_dynamic_max field" + ) + ] ~name:"set_memory_dynamic_max" ~doc:"Set the value of the memory_dynamic_max field" ~params: @@ -651,7 +807,14 @@ let set_memory_dynamic_max = ~allowed_roles:_R_VM_POWER_ADMIN ~errs:[] ~doc_tags:[Memory] () let set_memory_dynamic_min = - call ~flags:[`Session] ~in_product_since:rel_midnight_ride + call ~flags:[`Session] + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Set the value of the memory_dynamic_min field" + ) + ] ~name:"set_memory_dynamic_min" ~doc:"Set the value of the memory_dynamic_min field" ~params: @@ -662,7 +825,15 @@ let set_memory_dynamic_min = ~allowed_roles:_R_VM_POWER_ADMIN ~errs:[] ~doc_tags:[Memory] () let set_memory_dynamic_range = - call ~name:"set_memory_dynamic_range" ~in_product_since:rel_midnight_ride + call ~name:"set_memory_dynamic_range" + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Set the minimum and maximum amounts of physical memory the VM is \ + allowed to use." + ) + ] ~doc: "Set the minimum and maximum amounts of physical memory the VM is \ allowed to use." @@ -678,7 +849,9 @@ let set_memory_dynamic_range = (* When HA is enabled we need to prevent memory *) (* changes which will break the recovery plan. *) let set_memory_static_max = - call ~flags:[`Session] ~in_product_since:rel_orlando + call ~flags:[`Session] + ~lifecycle: + [(Published, rel_orlando, "Set the value of the memory_static_max field")] ~name:"set_memory_static_max" ~doc:"Set the value of the memory_static_max field" ~errs:[Api_errors.ha_operation_would_break_failover_plan] @@ -691,7 +864,14 @@ let set_memory_static_max = ~doc_tags:[Memory] () let set_memory_static_min = - call ~flags:[`Session] ~in_product_since:rel_midnight_ride + call ~flags:[`Session] + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Set the value of the memory_static_min field" + ) + ] ~name:"set_memory_static_min" ~doc:"Set the value of the memory_static_min field" ~errs:[] ~allowed_roles:_R_VM_POWER_ADMIN @@ -703,7 +883,15 @@ let set_memory_static_min = ~doc_tags:[Memory] () let set_memory_static_range = - call ~name:"set_memory_static_range" ~in_product_since:rel_midnight_ride + call ~name:"set_memory_static_range" + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Set the static (ie boot-time) range of virtual memory that the VM \ + is allowed to use." + ) + ] ~doc: "Set the static (ie boot-time) range of virtual memory that the VM is \ allowed to use." @@ -717,7 +905,9 @@ let set_memory_static_range = ~doc_tags:[Memory] () let set_memory_limits = - call ~name:"set_memory_limits" ~in_product_since:rel_midnight_ride + call ~name:"set_memory_limits" + ~lifecycle: + [(Published, rel_midnight_ride, "Set the memory limits of this VM.")] ~doc:"Set the memory limits of this VM." ~allowed_roles:_R_VM_POWER_ADMIN ~params: [ @@ -730,7 +920,16 @@ let set_memory_limits = ~doc_tags:[Memory] () let set_memory = - call ~name:"set_memory" ~in_product_since:rel_ely + call ~name:"set_memory" + ~lifecycle: + [ + ( Published + , rel_ely + , "Set the memory allocation of this VM. Sets all of \ + memory_static_max, memory_dynamic_min, and memory_dynamic_max to \ + the given value, and leaves memory_static_min untouched." + ) + ] ~doc: "Set the memory allocation of this VM. Sets all of memory_static_max, \ memory_dynamic_min, and memory_dynamic_max to the given value, and \ @@ -744,8 +943,12 @@ let set_memory = ~doc_tags:[Memory] () let set_memory_target_live = - call ~name:"set_memory_target_live" ~in_product_since:rel_rio - ~internal_deprecated_since:rel_midnight_ride + call ~name:"set_memory_target_live" + ~lifecycle: + [ + (Published, rel_rio, "Set the memory target for a running VM") + ; (Deprecated, rel_midnight_ride, "") + ] ~doc:"Set the memory target for a running VM" ~allowed_roles:_R_VM_POWER_ADMIN ~params: @@ -753,16 +956,31 @@ let set_memory_target_live = ~doc_tags:[Memory] () let wait_memory_target_live = - call ~name:"wait_memory_target_live" ~in_product_since:rel_orlando - ~internal_deprecated_since:rel_midnight_ride + call ~name:"wait_memory_target_live" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Wait for a running VM to reach its current memory target" + ) + ; (Deprecated, rel_midnight_ride, "") + ] ~doc:"Wait for a running VM to reach its current memory target" ~allowed_roles:_R_READ_ONLY ~params:[(Ref _vm, "self", "The VM")] ~doc_tags:[Memory] () let get_cooperative = - call ~name:"get_cooperative" ~in_product_since:rel_midnight_ride - ~internal_deprecated_since:rel_tampa + call ~name:"get_cooperative" + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Return true if the VM is currently 'co-operative' i.e. is expected \ + to reach a balloon target and actually has done" + ) + ; (Deprecated, rel_tampa, "") + ] ~doc: "Return true if the VM is currently 'co-operative' i.e. is expected to \ reach a balloon target and actually has done" @@ -771,7 +989,15 @@ let get_cooperative = ~allowed_roles:_R_READ_ONLY ~doc_tags:[Memory] () let query_services = - call ~name:"query_services" ~in_product_since:rel_tampa + call ~name:"query_services" + ~lifecycle: + [ + ( Published + , rel_tampa + , "Query the system services advertised by this VM and register them. \ + This can only be applied to a system domain." + ) + ] ~doc: "Query the system services advertised by this VM and register them. This \ can only be applied to a system domain." @@ -782,7 +1008,16 @@ let query_services = (* VM.StartOn *) let start_on = - call ~in_product_since:rel_rio ~name:"start_on" + call + ~lifecycle: + [ + ( Published + , rel_rio + , "Start the specified VM on a particular host. This function can \ + only be called with the VM is in the Halted State." + ) + ] + ~name:"start_on" ~doc: "Start the specified VM on a particular host. This function can only be \ called with the VM is in the Halted State." @@ -814,7 +1049,16 @@ let start_on = (* VM.Pause *) let pause = - call ~in_product_since:rel_rio ~name:"pause" + call + ~lifecycle: + [ + ( Published + , rel_rio + , "Pause the specified VM. This can only be called when the specified \ + VM is in the Running state." + ) + ] + ~name:"pause" ~doc: "Pause the specified VM. This can only be called when the specified VM \ is in the Running state." @@ -831,7 +1075,16 @@ let pause = (* VM.UnPause *) let unpause = - call ~in_product_since:rel_rio ~name:"unpause" + call + ~lifecycle: + [ + ( Published + , rel_rio + , "Resume the specified VM. This can only be called when the specified \ + VM is in the Paused state." + ) + ] + ~name:"unpause" ~doc: "Resume the specified VM. This can only be called when the specified VM \ is in the Paused state." @@ -847,7 +1100,17 @@ let unpause = (* VM.CleanShutdown *) let cleanShutdown = - call ~in_product_since:rel_rio ~name:"clean_shutdown" + call + ~lifecycle: + [ + ( Published + , rel_rio + , "Attempt to cleanly shutdown the specified VM. (Note: this may not \ + be supported---e.g. if a guest agent is not installed). This can \ + only be called when the specified VM is in the Running state." + ) + ] + ~name:"clean_shutdown" ~doc: "Attempt to cleanly shutdown the specified VM. (Note: this may not be \ supported---e.g. if a guest agent is not installed). This can only be \ @@ -865,7 +1128,17 @@ let cleanShutdown = (* VM.CleanReboot *) let cleanReboot = - call ~in_product_since:rel_rio ~name:"clean_reboot" + call + ~lifecycle: + [ + ( Published + , rel_rio + , "Attempt to cleanly shutdown the specified VM (Note: this may not be \ + supported---e.g. if a guest agent is not installed). This can only \ + be called when the specified VM is in the Running state." + ) + ] + ~name:"clean_reboot" ~doc: "Attempt to cleanly shutdown the specified VM (Note: this may not be \ supported---e.g. if a guest agent is not installed). This can only be \ @@ -883,7 +1156,15 @@ let cleanReboot = (* VM.HardShutdown *) let hardShutdown = - call ~in_product_since:rel_rio ~name:"hard_shutdown" + call + ~lifecycle: + [ + ( Published + , rel_rio + , "Stop executing the specified VM without attempting a clean shutdown." + ) + ] + ~name:"hard_shutdown" ~doc:"Stop executing the specified VM without attempting a clean shutdown." ~params:[(Ref _vm, "vm", "The VM to destroy")] ~errs: @@ -898,7 +1179,16 @@ let hardShutdown = (* VM.Shutdown *) let shutdown = - call ~in_product_since:rel_clearwater ~name:"shutdown" + call + ~lifecycle: + [ + ( Published + , rel_clearwater + , "Attempts to first clean shutdown a VM and if it should fail then \ + perform a hard shutdown on it." + ) + ] + ~name:"shutdown" ~doc: "Attempts to first clean shutdown a VM and if it should fail then \ perform a hard shutdown on it." @@ -916,7 +1206,18 @@ let shutdown = (* VM.PowerStateReset *) let stateReset = - call ~in_product_since:rel_rio ~name:"power_state_reset" + call + ~lifecycle: + [ + ( Published + , rel_rio + , "Reset the power-state of the VM to halted in the database only. \ + (Used to recover from slave failures in pooling scenarios by \ + resetting the power-states of VMs running on dead slaves to \ + halted.) This is a potentially dangerous operation; use with care." + ) + ] + ~name:"power_state_reset" ~doc: "Reset the power-state of the VM to halted in the database only. (Used \ to recover from slave failures in pooling scenarios by resetting the \ @@ -928,7 +1229,16 @@ let stateReset = (* VM.HardReboot *) let hardReboot = - call ~in_product_since:rel_rio ~name:"hard_reboot" + call + ~lifecycle: + [ + ( Published + , rel_rio + , "Stop executing the specified VM without attempting a clean shutdown \ + and immediately restart the VM." + ) + ] + ~name:"hard_reboot" ~doc: "Stop executing the specified VM without attempting a clean shutdown and \ immediately restart the VM." @@ -943,17 +1253,34 @@ let hardReboot = ~allowed_roles:_R_VM_OP () let hardReboot_internal = - call ~in_product_since:rel_orlando ~name:"hard_reboot_internal" + call + ~lifecycle: + [ + ( Published + , rel_orlando + , "Internal function which immediately restarts the specified VM." + ) + ; (Deprecated, rel_midnight_ride, "") + ] + ~name:"hard_reboot_internal" ~doc:"Internal function which immediately restarts the specified VM." ~params:[(Ref _vm, "vm", "The VM to reboot")] - ~pool_internal:true ~hide_from_docs:true - ~internal_deprecated_since:rel_midnight_ride - ~allowed_roles:_R_LOCAL_ROOT_ONLY () + ~pool_internal:true ~hide_from_docs:true ~allowed_roles:_R_LOCAL_ROOT_ONLY + () (* VM.Hibernate *) let suspend = - call ~in_product_since:rel_rio ~name:"suspend" + call + ~lifecycle: + [ + ( Published + , rel_rio + , "Suspend the specified VM to disk. This can only be called when the \ + specified VM is in the Running state." + ) + ] + ~name:"suspend" ~doc: "Suspend the specified VM to disk. This can only be called when the \ specified VM is in the Running state." @@ -971,16 +1298,32 @@ let suspend = (* VM.clsp -- clone suspended, undocumented API for VMLogix *) let csvm = - call ~name:"csvm" ~in_product_since:rel_rio + call ~name:"csvm" + ~lifecycle: + [ + ( Published + , rel_rio + , "undocumented. internal use only. This call is deprecated." + ) + ; (Deprecated, rel_miami, "") + ] ~doc:"undocumented. internal use only. This call is deprecated." ~params:[(Ref _vm, "vm", "")] ~result:(Ref _vm, "") ~errs:(errnames_of_call clone) ~hide_from_docs:true - ~internal_deprecated_since:rel_miami ~allowed_roles:_R_VM_ADMIN () + ~allowed_roles:_R_VM_ADMIN () (* VM.UnHibernate *) let resume = - call ~name:"resume" ~in_product_since:rel_rio + call ~name:"resume" + ~lifecycle: + [ + ( Published + , rel_rio + , "Awaken the specified VM and resume it. This can only be called \ + when the specified VM is in the Suspended state." + ) + ] ~doc: "Awaken the specified VM and resume it. This can only be called when \ the specified VM is in the Suspended state." @@ -1004,7 +1347,15 @@ let resume = ~allowed_roles:_R_VM_OP () let resume_on = - call ~name:"resume_on" ~in_product_since:rel_rio + call ~name:"resume_on" + ~lifecycle: + [ + ( Published + , rel_rio + , "Awaken the specified VM and resume it on a particular Host. This \ + can only be called when the specified VM is in the Suspended state." + ) + ] ~doc: "Awaken the specified VM and resume it on a particular Host. This can \ only be called when the specified VM is in the Suspended state." @@ -1031,8 +1382,9 @@ let resume_on = () let pool_migrate = - call ~in_oss_since:None ~in_product_since:rel_rio ~name:"pool_migrate" - ~doc:"Migrate a VM to another Host." + call ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Migrate a VM to another Host.")] + ~name:"pool_migrate" ~doc:"Migrate a VM to another Host." ~params: [ (Ref _vm, "vm", "The VM to migrate") @@ -1056,7 +1408,14 @@ let pool_migrate = () let pool_migrate_complete = - call ~in_oss_since:None ~in_product_since:rel_tampa + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_tampa + , "Tell a destination host that migration is complete." + ) + ] ~name:"pool_migrate_complete" ~doc:"Tell a destination host that migration is complete." ~params: @@ -1070,7 +1429,7 @@ let pool_migrate_complete = () let set_vcpus_number_live = - call ~name:"set_VCPUs_number_live" ~in_product_since:rel_rio + call ~name:"set_VCPUs_number_live" ~lifecycle: [ (Published, rel_rio, "Set the number of VCPUs for a running VM") @@ -1088,7 +1447,13 @@ let set_vcpus_number_live = let set_VCPUs_max = call ~flags:[`Session] ~name:"set_VCPUs_max" - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Set the maximum number of VCPUs for a halted VM" + ) + ] ~doc:"Set the maximum number of VCPUs for a halted VM" ~params: [ @@ -1099,7 +1464,13 @@ let set_VCPUs_max = let set_VCPUs_at_startup = call ~flags:[`Session] ~name:"set_VCPUs_at_startup" - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Set the number of startup VCPUs for a halted VM" + ) + ] ~doc:"Set the number of startup VCPUs for a halted VM" ~params: [ @@ -1110,7 +1481,13 @@ let set_VCPUs_at_startup = let set_HVM_shadow_multiplier = call ~flags:[`Session] ~name:"set_HVM_shadow_multiplier" - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Set the shadow memory multiplier on a halted VM" + ) + ] ~doc:"Set the shadow memory multiplier on a halted VM" ~params: [ @@ -1120,7 +1497,9 @@ let set_HVM_shadow_multiplier = ~allowed_roles:_R_VM_POWER_ADMIN () let set_shadow_multiplier_live = - call ~name:"set_shadow_multiplier_live" ~in_product_since:rel_rio + call ~name:"set_shadow_multiplier_live" + ~lifecycle: + [(Published, rel_rio, "Set the shadow memory multiplier on a running VM")] ~doc:"Set the shadow memory multiplier on a running VM" ~params: [ @@ -1130,7 +1509,15 @@ let set_shadow_multiplier_live = ~allowed_roles:_R_VM_POWER_ADMIN () let add_to_VCPUs_params_live = - call ~name:"add_to_VCPUs_params_live" ~in_product_since:rel_rio + call ~name:"add_to_VCPUs_params_live" + ~lifecycle: + [ + ( Published + , rel_rio + , "Add the given key-value pair to VM.VCPUs_params, and apply that \ + value on the running VM" + ) + ] ~doc: "Add the given key-value pair to VM.VCPUs_params, and apply that value \ on the running VM" @@ -1169,7 +1556,16 @@ let set_NVRAM = ~allowed_roles:_R_VM_ADMIN () let send_sysrq = - call ~name:"send_sysrq" ~in_product_since:rel_rio + call ~name:"send_sysrq" + ~lifecycle: + [ + ( Published + , rel_rio + , "Send the given key as a sysrq to this VM. The key is specified as \ + a single character (a String of length 1). This can only be called \ + when the specified VM is in the Running state." + ) + ] ~doc: "Send the given key as a sysrq to this VM. The key is specified as a \ single character (a String of length 1). This can only be called when \ @@ -1179,7 +1575,15 @@ let send_sysrq = ~allowed_roles:_R_POOL_ADMIN () let send_trigger = - call ~name:"send_trigger" ~in_product_since:rel_rio + call ~name:"send_trigger" + ~lifecycle: + [ + ( Published + , rel_rio + , "Send the named trigger to this VM. This can only be called when \ + the specified VM is in the Running state." + ) + ] ~doc: "Send the named trigger to this VM. This can only be called when the \ specified VM is in the Running state." @@ -1189,7 +1593,15 @@ let send_trigger = ~allowed_roles:_R_POOL_ADMIN () let migrate_send = - call ~name:"migrate_send" ~in_product_since:rel_tampa + call ~name:"migrate_send" + ~lifecycle: + [ + ( Published + , rel_tampa + , "Migrate the VM to another host. This can only be called when the \ + specified VM is in the Running state." + ) + ] ~doc: "Migrate the VM to another host. This can only be called when the \ specified VM is in the Running state." @@ -1251,7 +1663,14 @@ let migrate_send = ~allowed_roles:_R_VM_POWER_ADMIN () let assert_can_migrate = - call ~name:"assert_can_migrate" ~in_product_since:rel_tampa + call ~name:"assert_can_migrate" + ~lifecycle: + [ + ( Published + , rel_tampa + , "Assert whether a VM can be migrated to the specified destination." + ) + ] ~doc:"Assert whether a VM can be migrated to the specified destination." ~versioned_params: [ @@ -1340,19 +1759,33 @@ let assert_can_migrate_sender = ~allowed_roles:_R_VM_POWER_ADMIN ~hide_from_docs:true () let s3_suspend = - call ~name:"s3_suspend" ~in_product_since:rel_midnight_ride + call ~name:"s3_suspend" + ~lifecycle: + [(Published, rel_midnight_ride, "Try to put the VM into ACPI S3 state")] ~doc:"Try to put the VM into ACPI S3 state" ~params:[(Ref _vm, "vm", "The VM")] ~hide_from_docs:true ~allowed_roles:_R_VM_OP () let s3_resume = - call ~name:"s3_resume" ~in_product_since:rel_midnight_ride + call ~name:"s3_resume" + ~lifecycle: + [ + (Published, rel_midnight_ride, "Try to resume the VM from ACPI S3 state") + ] ~doc:"Try to resume the VM from ACPI S3 state" ~params:[(Ref _vm, "vm", "The VM")] ~hide_from_docs:true ~allowed_roles:_R_VM_OP () let create_new_blob = - call ~name:"create_new_blob" ~in_product_since:rel_orlando + call ~name:"create_new_blob" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Create a placeholder for a named binary blob of data that is \ + associated with this VM" + ) + ] ~doc: "Create a placeholder for a named binary blob of data that is associated \ with this VM" @@ -1394,7 +1827,22 @@ let create_new_blob = ~allowed_roles:_R_VM_POWER_ADMIN () let set_bios_strings = - call ~name:"set_bios_strings" ~in_product_since:rel_inverness + call ~name:"set_bios_strings" + ~lifecycle: + [ + ( Published + , rel_inverness + , "Set custom BIOS strings to this VM. VM will be given a default set \ + of BIOS strings, only some of which can be overridden by the \ + supplied values. Allowed keys are: 'bios-vendor', 'bios-version', \ + 'system-manufacturer', 'system-product-name', 'system-version', \ + 'system-serial-number', 'enclosure-asset-tag', \ + 'baseboard-manufacturer', 'baseboard-product-name', \ + 'baseboard-version', 'baseboard-serial-number', \ + 'baseboard-asset-tag', 'baseboard-location-in-chassis', \ + 'enclosure-asset-tag'" + ) + ] ~doc: "Set custom BIOS strings to this VM. VM will be given a default set of \ BIOS strings, only some of which can be overridden by the supplied \ @@ -1417,7 +1865,14 @@ let set_bios_strings = () let copy_bios_strings = - call ~name:"copy_bios_strings" ~in_product_since:rel_midnight_ride + call ~name:"copy_bios_strings" + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Copy the BIOS strings from the given host to this VM" + ) + ] ~doc:"Copy the BIOS strings from the given host to this VM" ~params: [ @@ -1434,13 +1889,15 @@ let set_protection_policy = let set_snapshot_schedule = call ~name:"set_snapshot_schedule" ~in_oss_since:None - ~in_product_since:rel_falcon + ~lifecycle: + [(Published, rel_falcon, "Set the value of the snapshot schedule field")] ~doc:"Set the value of the snapshot schedule field" ~params:[(Ref _vm, "self", "The VM"); (Ref _vmss, "value", "The value")] ~flags:[`Session] ~allowed_roles:_R_POOL_OP () let set_start_delay = - call ~name:"set_start_delay" ~in_product_since:rel_boston + call ~name:"set_start_delay" + ~lifecycle:[(Published, rel_boston, "Set this VM's start delay in seconds")] ~doc:"Set this VM's start delay in seconds" ~params: [ @@ -1450,7 +1907,9 @@ let set_start_delay = ~allowed_roles:_R_POOL_OP () let set_shutdown_delay = - call ~name:"set_shutdown_delay" ~in_product_since:rel_boston + call ~name:"set_shutdown_delay" + ~lifecycle: + [(Published, rel_boston, "Set this VM's shutdown delay in seconds")] ~doc:"Set this VM's shutdown delay in seconds" ~params: [ @@ -1460,14 +1919,23 @@ let set_shutdown_delay = ~allowed_roles:_R_POOL_OP () let set_order = - call ~name:"set_order" ~in_product_since:rel_boston + call ~name:"set_order" + ~lifecycle:[(Published, rel_boston, "Set this VM's boot order")] ~doc:"Set this VM's boot order" ~params: [(Ref _vm, "self", "The VM"); (Int, "value", "This VM's boot order")] ~allowed_roles:_R_POOL_OP () let set_suspend_VDI = - call ~name:"set_suspend_VDI" ~in_product_since:rel_boston + call ~name:"set_suspend_VDI" + ~lifecycle: + [ + ( Published + , rel_boston + , "Set this VM's suspend VDI, which must be indentical to its current \ + one" + ) + ] ~doc: "Set this VM's suspend VDI, which must be indentical to its current one" ~params: @@ -1475,7 +1943,14 @@ let set_suspend_VDI = ~allowed_roles:_R_POOL_OP () let assert_can_be_recovered = - call ~name:"assert_can_be_recovered" ~in_product_since:rel_boston + call ~name:"assert_can_be_recovered" + ~lifecycle: + [ + ( Published + , rel_boston + , "Assert whether all SRs required to recover this VM are available." + ) + ] ~doc:"Assert whether all SRs required to recover this VM are available." ~params: [ @@ -1489,7 +1964,14 @@ let assert_can_be_recovered = ~allowed_roles:_R_READ_ONLY () let get_SRs_required_for_recovery = - call ~name:"get_SRs_required_for_recovery" ~in_product_since:rel_creedence + call ~name:"get_SRs_required_for_recovery" + ~lifecycle: + [ + ( Published + , rel_creedence + , "List all the SR's that are required for the VM to be recovered" + ) + ] ~doc:"List all the SR's that are required for the VM to be recovered" ~params: [ @@ -1503,7 +1985,9 @@ let get_SRs_required_for_recovery = ~errs:[] ~allowed_roles:_R_READ_ONLY () let recover = - call ~name:"recover" ~in_product_since:rel_boston ~doc:"Recover the VM" + call ~name:"recover" + ~lifecycle:[(Published, rel_boston, "Recover the VM")] + ~doc:"Recover the VM" ~params: [ (Ref _vm, "self", "The VM to recover") @@ -1519,7 +2003,8 @@ let recover = ~allowed_roles:_R_READ_ONLY () let set_appliance = - call ~name:"set_appliance" ~in_product_since:rel_boston + call ~name:"set_appliance" + ~lifecycle:[(Published, rel_boston, "Assign this VM to an appliance.")] ~doc:"Assign this VM to an appliance." ~params: [ @@ -1542,7 +2027,8 @@ let set_groups = ~allowed_roles:_R_VM_ADMIN () let call_plugin = - call ~name:"call_plugin" ~in_product_since:rel_cream + call ~name:"call_plugin" + ~lifecycle:[(Published, rel_cream, "Call an API plugin on this vm")] ~doc:"Call an API plugin on this vm" ~params: [ @@ -1555,7 +2041,18 @@ let call_plugin = ~allowed_roles:_R_VM_OP () let set_has_vendor_device = - call ~name:"set_has_vendor_device" ~in_product_since:rel_dundee + call ~name:"set_has_vendor_device" + ~lifecycle: + [ + ( Published + , rel_dundee + , "Controls whether, when the VM starts in HVM mode, its virtual \ + hardware will include the emulated PCI device for which drivers may \ + be available through Windows Update. Usually this should never be \ + changed on a VM on which Windows has been installed: changing it on \ + such a VM is likely to lead to a crash on next start." + ) + ] ~doc: "Controls whether, when the VM starts in HVM mode, its virtual hardware \ will include the emulated PCI device for which drivers may be available \ @@ -1570,7 +2067,8 @@ let set_has_vendor_device = ~allowed_roles:_R_VM_ADMIN ~doc_tags:[Windows] () let import = - call ~name:"import" ~in_product_since:rel_dundee + call ~name:"import" + ~lifecycle:[(Published, rel_dundee, "Import an XVA from a URI")] ~doc:"Import an XVA from a URI" ~params: [ @@ -1647,7 +2145,15 @@ let operations = let set_blocked_operations = call ~name:"set_blocked_operations" - ~in_product_since:rel_orlando (* but updated 2024 *) + ~lifecycle: + [ + ( Published + , rel_orlando + , "Update list of operations which have been explicitly blocked and an \ + error code" + ) + ] + (* but updated 2024 *) ~doc: "Update list of operations which have been explicitly blocked and an \ error code" @@ -1660,7 +2166,15 @@ let set_blocked_operations = let add_to_blocked_operations = call ~name:"add_to_blocked_operations" - ~in_product_since:rel_orlando (* but updated 2024 *) + ~lifecycle: + [ + ( Published + , rel_orlando + , "Update list of operations which have been explicitly blocked and an \ + error code" + ) + ] + (* but updated 2024 *) ~doc: "Update list of operations which have been explicitly blocked and an \ error code" @@ -1674,7 +2188,15 @@ let add_to_blocked_operations = let remove_from_blocked_operations = call ~name:"remove_from_blocked_operations" - ~in_product_since:rel_orlando (* but updated 2024 *) + ~lifecycle: + [ + ( Published + , rel_orlando + , "Update list of operations which have been explicitly blocked and an \ + error code" + ) + ] + (* but updated 2024 *) ~doc: "Update list of operations which have been explicitly blocked and an \ error code" @@ -1683,7 +2205,16 @@ let remove_from_blocked_operations = ~allowed_roles:_R_VM_ADMIN () let assert_operation_valid = - call ~in_oss_since:None ~in_product_since:rel_rio + call ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Check to see whether this operation is acceptable in the current \ + state of the system, raising an error if the operation is invalid \ + for some reason" + ) + ] ~name:"assert_operation_valid" ~doc: "Check to see whether this operation is acceptable in the current state \ @@ -1697,7 +2228,9 @@ let assert_operation_valid = ~allowed_roles:_R_READ_ONLY () let update_allowed_operations = - call ~in_oss_since:None ~in_product_since:rel_rio + call ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "Recomputes the list of acceptable operations")] ~name:"update_allowed_operations" ~doc:"Recomputes the list of acceptable operations" ~params:[(Ref _vm, _self, "reference to the object")] From ef7c4d3c9eb7a0fe0e36481bd3de6367495c672e Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Mon, 23 Sep 2024 14:03:01 +0100 Subject: [PATCH 036/141] idl: Remove long-unused code Signed-off-by: Andrii Sultanov --- ocaml/idl/datamodel.ml | 37 +------------------------------------ ocaml/idl/datamodel_host.ml | 11 ----------- ocaml/idl/datamodel_vm.ml | 8 -------- 3 files changed, 1 insertion(+), 55 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index b72087fb432..6677c88009c 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -526,8 +526,7 @@ module Task = struct ; ("XenCenterUUID", _R_VM_OP) ; ("XenCenterMeddlingActionTitle", _R_VM_OP) ] - ; (* field ~ty:(Set(Ref _alert)) ~in_product_since:rel_miami ~qualifier:DynamicRO "alerts" "all alerts related to this task"; *) - field ~qualifier:DynamicRO ~in_product_since:rel_orlando + ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VRef "")) ~ty:(Ref _task) "subtask_of" "Ref pointing to the task this is a substask of." ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando @@ -4077,21 +4076,6 @@ module LVHD = struct () end -(* --- rws: removed this after talking to Andy and Julian - let filesystem = - { name = _filesystem; description = "An on-disk filesystem"; - messages = []; - contents = - field "uuid" "globally-unique ID" :: - let field ?(ty=Int) = field ~qualifier:DynamicRO ~ty in - [ field "block_size" "block size"; - field "total_blocks" "total blocks on disk"; - field "available_blocks" "blocks available for allocation"; - field "used_blocks" "blocks already in use"; - field "percentage_free" "Percentage of free space left in filesystem"; - field ~ty:String "type" "filesystem type" ] } -*) - module Vdi_nbd_server_info = struct let t = let lifecycle = [(Published, rel_inverness, "")] in @@ -7510,25 +7494,6 @@ module Secret = struct () end -(* - -let alert = - create_obj ~in_product_since:rel_miami ~in_oss_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_alert ~descr:"Notification information" - ~gen_events:true - ~doccomments:[] - ~messages: [] - ~contents: - [ - uid ~in_oss_since:None _alert; - field ~in_oss_since:None ~qualifier:StaticRO ~ty:String "message" "description of the alert"; - field ~in_oss_since:None ~qualifier:StaticRO ~ty:(Map (String, String)) ~default_value:(Some (VMap [])) "params" "parameters of the alert"; - field ~in_oss_since:None ~qualifier:StaticRO ~ty:alert_level "level" "level of importance (info/warning/error/critical)"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:Bool "system" "system task"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Ref _task) "task" "task related to this alert (null reference if there's no task associated)"; - ] - () -*) - (** network sriov **) module Network_sriov = struct let lifecycle = [(Published, rel_kolkata, "")] diff --git a/ocaml/idl/datamodel_host.ml b/ocaml/idl/datamodel_host.ml index 76508745467..669ffeab248 100644 --- a/ocaml/idl/datamodel_host.ml +++ b/ocaml/idl/datamodel_host.ml @@ -209,17 +209,6 @@ let ha_wait_for_shutdown_via_statefile = ~pool_internal:true ~hide_from_docs:true ~allowed_roles:_R_LOCAL_ROOT_ONLY () -(* -let host_query_ha = call ~flags:[`Session] - ~in_product_since:rel_miami - ~name:"query_ha" - ~doc:"Return the local HA configuration as seen by this host" - ~params:[] - ~custom_marshaller:true - ~pool_internal:true - ~hide_from_docs:true - () -*) let request_backup = call ~flags:[`Session] ~name:"request_backup" ~lifecycle: diff --git a/ocaml/idl/datamodel_vm.ml b/ocaml/idl/datamodel_vm.ml index 8a5120ca679..69bae5f43d9 100644 --- a/ocaml/idl/datamodel_vm.ml +++ b/ocaml/idl/datamodel_vm.ml @@ -83,14 +83,6 @@ let guest_memory = ~doc_tags:[Memory] ] -(* -let power_behaviour = - Enum ("power_behaviour", [ "destroy", "destroy the VM state"; - "restart", "automatically restart the VM"; - "preserve", "leave VM running"; - "rename_restart", "leave VM running and restart a new one" ]) -*) - (** Action to take on guest reboot/power off/sleep etc *) let on_crash_behaviour = Enum From b1ac9f2da25de5c4fbde5876a995948c5bbae709 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Mon, 23 Sep 2024 14:13:23 +0100 Subject: [PATCH 037/141] idl: Remove in_product_since and internal_deprecated_since parameters for create_obj These were already being replaced with lifecycle equivalents in datamodel_common, search and replace them all and remove parameters as such. Signed-off-by: Andrii Sultanov --- ocaml/idl/datamodel.ml | 294 ++++++++++++++++++++--------- ocaml/idl/datamodel_common.ml | 21 +-- ocaml/idl/datamodel_diagnostics.ml | 9 +- ocaml/idl/datamodel_host.ml | 9 +- ocaml/idl/datamodel_pool.ml | 9 +- ocaml/idl/datamodel_vm.ml | 4 +- 6 files changed, 224 insertions(+), 122 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 6677c88009c..88341f3f6b2 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -235,9 +235,11 @@ module Session = struct ~in_oss_since:None ~allowed_roles:_R_POOL_OP () let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_session - ~descr:"A session" ~gen_events:false ~doccomments:[] + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "A session")] + ~in_oss_since:oss_since_303 ~persist:PersistNothing + ~gen_constructor_destructor:false ~name:_session ~descr:"A session" + ~gen_events:false ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_ADMIN ~messages: [ @@ -447,8 +449,10 @@ module Task = struct Enum ("task_allowed_operations", List.map operation_enum [cancel; destroy]) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_task + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "A long-running asynchronous task")] + ~in_oss_since:oss_since_303 ~persist:PersistNothing + ~gen_constructor_destructor:false ~name:_task ~descr:"A long-running asynchronous task" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages: @@ -570,7 +574,7 @@ let iobandwidth = module User = struct let t = (* DEPRECATED in favor of subject *) - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 + create_obj ~in_db:true ~in_oss_since:oss_since_303 ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_user ~descr:"A user of the system" ~gen_events:false ~lifecycle: @@ -632,9 +636,10 @@ module Host_crashdump = struct ~allowed_roles:_R_POOL_OP () let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None - ~persist:PersistEverything ~gen_constructor_destructor:false - ~name:_host_crashdump ~gen_events:true + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "Represents a host crash dump")] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_host_crashdump ~gen_events:true ~descr:"Represents a host crash dump" ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages:[destroy; upload] ~contents: @@ -816,9 +821,11 @@ module Pool_update = struct ~allowed_roles:_R_POOL_OP () let t = - create_obj ~in_db:true ~in_product_since:rel_ely ~in_oss_since:None - ~persist:PersistEverything ~gen_constructor_destructor:false - ~gen_events:true ~name:_pool_update + create_obj ~in_db:true + ~lifecycle: + [(Published, rel_ely, "Pool-wide updates to the host software")] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:false ~gen_events:true ~name:_pool_update ~descr:"Pool-wide updates to the host software" ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages: @@ -1018,8 +1025,10 @@ module Pool_patch = struct ~allowed_roles:_R_POOL_OP () let t = - create_obj ~in_db:true ~in_product_since:rel_miami ~in_oss_since:None - ~internal_deprecated_since:(Some rel_ely) ~persist:PersistEverything + create_obj ~in_db:true + ~lifecycle: + [(Published, rel_miami, "Pool-wide patches"); (Deprecated, rel_ely, "")] + ~in_oss_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~gen_events:true ~name:_pool_patch ~descr:"Pool-wide patches" ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP @@ -1094,8 +1103,13 @@ module Host_patch = struct ~allowed_roles:_R_POOL_OP () let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None - ~internal_deprecated_since:(Some rel_ely) ~persist:PersistEverything + create_obj ~in_db:true + ~lifecycle: + [ + (Published, rel_rio, "Represents a patch stored on a server") + ; (Deprecated, rel_ely, "") + ] + ~in_oss_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_host_patch ~gen_events:true ~descr:"Represents a patch stored on a server" ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages:[destroy; apply] @@ -1145,11 +1159,12 @@ module Host_metrics = struct ] let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:false - ~name:_host_metrics ~descr:"The metrics associated with a host" - ~gen_events:true ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP ~messages:[] + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "The metrics associated with a host")] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_host_metrics + ~descr:"The metrics associated with a host" ~gen_events:true + ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages:[] ~contents: [ uid _host_metrics @@ -1169,7 +1184,7 @@ end module Host_cpu = struct let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 + create_obj ~in_db:true ~in_oss_since:oss_since_303 ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_hostcpu ~descr:"A physical CPU" ~gen_events:true ~lifecycle: @@ -1500,9 +1515,11 @@ module Network = struct (** A virtual network *) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_network - ~descr:"A virtual network" ~gen_events:true ~doccomments:[] + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "A virtual network")] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:true ~name:_network ~descr:"A virtual network" + ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_VM_ADMIN (* vm admins can create/destroy networks without PIFs *) ~doc_tags:[Networking] @@ -2074,8 +2091,17 @@ module PIF = struct ) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_pif + create_obj ~in_db:true + ~lifecycle: + [ + ( Published + , rel_rio + , "A physical network interface (note separate VLANs are represented \ + as several PIFs)" + ) + ] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_pif ~descr: "A physical network interface (note separate VLANs are represented as \ several PIFs)" @@ -2256,9 +2282,16 @@ end module PIF_metrics = struct let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:false - ~name:_pif_metrics + create_obj ~in_db:true + ~lifecycle: + [ + ( Published + , rel_rio + , "The metrics associated with a physical network interface" + ) + ] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_pif_metrics ~descr:"The metrics associated with a physical network interface" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~doc_tags:[Networking] @@ -2379,8 +2412,17 @@ module Bond = struct ~allowed_roles:_R_POOL_OP () let t = - create_obj ~in_db:true ~in_product_since:rel_miami ~in_oss_since:None - ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_bond + create_obj ~in_db:true + ~lifecycle: + [ + ( Published + , rel_miami + , "A Network bond that combines physical network interfaces, also \ + known as link aggregation" + ) + ] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_bond ~descr: "A Network bond that combines physical network interfaces, also known \ as link aggregation" @@ -2496,9 +2538,11 @@ module VLAN = struct ~allowed_roles:_R_POOL_OP () let t = - create_obj ~in_db:true ~in_product_since:rel_miami ~in_oss_since:None - ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_vlan - ~descr:"A VLAN mux/demux" ~gen_events:true ~doccomments:[] + create_obj ~in_db:true + ~lifecycle:[(Published, rel_miami, "A VLAN mux/demux")] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_vlan ~descr:"A VLAN mux/demux" + ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~doc_tags:[Networking] ~messages:[pool_introduce; create; destroy] ~contents: @@ -2661,8 +2705,16 @@ module PBD = struct ~doc:"Sets the PBD's device_config field" ~allowed_roles:_R_POOL_OP () let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_pbd + create_obj ~in_db:true + ~lifecycle: + [ + ( Published + , rel_rio + , "The physical block devices through which hosts access SRs" + ) + ] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:true ~name:_pbd ~descr:"The physical block devices through which hosts access SRs" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP @@ -3043,8 +3095,10 @@ module VIF = struct (** A virtual network interface *) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vif + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "A virtual network interface")] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:true ~name:_vif ~descr:"A virtual network interface" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_VM_ADMIN ~doc_tags:[Networking] ~messages: @@ -3177,11 +3231,12 @@ end module Data_source = struct let t = - create_obj ~in_db:false ~in_product_since:rel_orlando ~in_oss_since:None - ~persist:PersistNothing ~gen_constructor_destructor:false - ~name:_data_source ~descr:"Data sources for logging in RRDs" - ~gen_events:false ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_ADMIN ~messages:[] + create_obj ~in_db:false + ~lifecycle:[(Published, rel_orlando, "Data sources for logging in RRDs")] + ~in_oss_since:None ~persist:PersistNothing + ~gen_constructor_destructor:false ~name:_data_source + ~descr:"Data sources for logging in RRDs" ~gen_events:false + ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_ADMIN ~messages:[] ~contents: [ namespace ~name:"name" ~contents:(names oss_since_303 DynamicRO) () @@ -3856,9 +3911,11 @@ module SR = struct (** A storage repository. Note we overide default create/destroy methods with our own here... *) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_sr - ~descr:"A storage repository" ~gen_events:true ~doccomments:[] + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "A storage repository")] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_sr ~descr:"A storage repository" + ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages: [ @@ -3972,8 +4029,10 @@ module SM = struct ~doc:"Gets the SM's driver_filename field" () let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None - ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_sm + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "A storage manager plugin")] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_sm ~descr:"A storage manager plugin" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages:[] ~contents: @@ -4067,8 +4126,10 @@ module LVHD = struct () let t = - create_obj ~in_db:true ~in_product_since:rel_dundee ~in_oss_since:None - ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_lvhd + create_obj ~in_db:true + ~lifecycle:[(Published, rel_dundee, "LVHD SR specific operations")] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_lvhd ~descr:"LVHD SR specific operations" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_ADMIN ~messages:[enable_thin_provisioning] @@ -5077,9 +5138,11 @@ module VDI = struct (** A virtual disk *) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vdi - ~descr:"A virtual disk image" ~gen_events:true ~doccomments:[] + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "A virtual disk image")] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:true ~name:_vdi ~descr:"A virtual disk image" + ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_VM_ADMIN ~messages: [ @@ -5459,8 +5522,10 @@ module VBD = struct (** A virtual disk interface *) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vbd + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "A virtual block device")] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:true ~name:_vbd ~descr:"A virtual block device" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_VM_ADMIN ~messages: @@ -5591,8 +5656,12 @@ module Crashdump = struct (** A crashdump for a particular VM, stored in a particular VDI *) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None - ~internal_deprecated_since:(Some rel_inverness) ~persist:PersistEverything + create_obj ~in_db:true + ~lifecycle: + [ + (Published, rel_rio, "A VM crashdump"); (Deprecated, rel_inverness, "") + ] + ~in_oss_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_crashdump ~descr:"A VM crashdump" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages:[destroy] @@ -5702,8 +5771,13 @@ module Auth = struct ~allowed_roles:_R_READ_ONLY () let t = - create_obj ~in_db:false ~in_product_since:rel_george ~in_oss_since:None - ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_auth + create_obj ~in_db:false + ~lifecycle: + [ + (Published, rel_george, "Management of remote authentication services") + ] + ~in_oss_since:None ~persist:PersistNothing + ~gen_constructor_destructor:false ~name:_auth ~descr:"Management of remote authentication services" ~gen_events:false ~doccomments:[] ~messages_default_allowed_roles:_R_READ_ONLY ~messages: @@ -5779,8 +5853,11 @@ module Subject = struct (* a subject is a user/group that can log in xapi *) let t = - create_obj ~in_db:true ~in_product_since:rel_george ~in_oss_since:None - ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_subject + create_obj ~in_db:true + ~lifecycle: + [(Published, rel_george, "A user or group that can log in xapi")] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:true ~name:_subject ~descr:"A user or group that can log in xapi" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_ADMIN ~messages:[add_to_roles; remove_from_roles; get_permissions_name_label] @@ -5872,9 +5949,16 @@ module Role = struct (* - basic role: is the 1x1 mapping to each XAPI/HTTP call being protected, a leaf in the tree of roles *) (* - intermediate role: an intermediate node in the recursive tree of roles, usually not meant to the end-user *) let t = - create_obj ~in_db:true ~in_product_since:rel_midnight_ride - ~in_oss_since:None ~internal_deprecated_since:None - ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_role + create_obj ~in_db:true + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "A set of permissions associated with a subject" + ) + ] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_role ~descr:"A set of permissions associated with a subject" ~gen_events:true ~force_custom_actions:(Some StaticRO) (* force custom actions for getters *) @@ -5927,9 +6011,11 @@ module Console = struct (** A virtual console device *) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_console - ~descr:"A console" ~gen_events:true ~doccomments:[] + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "A console")] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:true ~name:_console ~descr:"A console" + ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_VM_ADMIN ~messages:[] ~contents: [ @@ -5986,10 +6072,11 @@ module VM_metrics = struct ] let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:false - ~name:_vm_metrics ~descr:"The metrics associated with a VM" - ~gen_events:true ~doccomments:[] + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "The metrics associated with a VM")] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_vm_metrics + ~descr:"The metrics associated with a VM" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_VM_ADMIN ~messages:[] ~contents: [ @@ -6048,9 +6135,17 @@ module VM_guest_metrics = struct (* Some of this stuff needs to persist (like PV drivers vsns etc.) so we know about what's likely to be in the VM even when it's off. Other things don't need to persist, so we specify these on a per-field basis *) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:false - ~name:_vm_guest_metrics + create_obj ~in_db:true + ~lifecycle: + [ + ( Published + , rel_rio + , "The metrics reported by the guest (as opposed to inferred from \ + outside)" + ) + ] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_vm_guest_metrics ~descr: "The metrics reported by the guest (as opposed to inferred from \ outside)" @@ -6728,11 +6823,11 @@ module VMSS = struct () let t = - create_obj ~in_db:true ~in_oss_since:None ~internal_deprecated_since:None - ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vmss - ~descr:"VM Snapshot Schedule" ~gen_events:true - ~in_product_since:rel_falcon ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP + create_obj ~in_db:true ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:true ~name:_vmss ~descr:"VM Snapshot Schedule" + ~gen_events:true + ~lifecycle:[(Published, rel_falcon, "VM Snapshot Schedule")] + ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages: [ snapshot_now @@ -6917,9 +7012,11 @@ module VM_appliance = struct ~doc:"Recover the VM appliance" ~allowed_roles:_R_READ_ONLY () let t = - create_obj ~in_db:true ~in_product_since:rel_boston ~in_oss_since:None - ~persist:PersistEverything ~gen_constructor_destructor:true - ~name:_vm_appliance ~descr:"VM appliance" ~gen_events:true ~doccomments:[] + create_obj ~in_db:true + ~lifecycle:[(Published, rel_boston, "VM appliance")] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:true ~name:_vm_appliance ~descr:"VM appliance" + ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages: [ @@ -6988,9 +7085,11 @@ module DR_task = struct ~allowed_roles:_R_POOL_OP () let t = - create_obj ~in_db:true ~in_product_since:rel_boston ~in_oss_since:None - ~persist:PersistEverything ~gen_constructor_destructor:false - ~name:_dr_task ~descr:"DR task" ~gen_events:true ~doccomments:[] + create_obj ~in_db:true + ~lifecycle:[(Published, rel_boston, "DR task")] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_dr_task ~descr:"DR task" + ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages:[create; destroy] ~contents: [ @@ -7262,8 +7361,10 @@ module Blob = struct ~flags:[`Session] ~allowed_roles:_R_POOL_OP () let t = - create_obj ~in_db:true ~in_product_since:rel_orlando ~in_oss_since:None - ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_blob + create_obj ~in_db:true + ~lifecycle:[(Published, rel_orlando, "A placeholder for a binary blob")] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_blob ~descr:"A placeholder for a binary blob" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages:[create; destroy] ~contents: @@ -7401,10 +7502,18 @@ module Message = struct ~allowed_roles:_R_READ_ONLY () let t = - create_obj ~in_db:false ~in_product_since:rel_orlando ~in_oss_since:None - ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_message + create_obj ~in_db:false + ~lifecycle: + [ + ( Published + , rel_orlando + , "An message for the attention of the administrator" + ) + ] + ~in_oss_since:None ~persist:PersistNothing + ~gen_constructor_destructor:false ~name:_message ~descr:"An message for the attention of the administrator" - ~gen_events:true ~doccomments:[] ~internal_deprecated_since:None + ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages: [ @@ -7477,7 +7586,8 @@ module Secret = struct let t = create_obj ~descr:"A secret" ~doccomments:[] ~gen_constructor_destructor:true ~gen_events:false ~in_db:true - ~in_oss_since:None ~in_product_since:rel_midnight_ride + ~in_oss_since:None + ~lifecycle:[(Published, rel_midnight_ride, "A secret")] ~messages:[introduce] ~messages_default_allowed_roles:_R_POOL_OP ~implicit_messages_allowed_roles:_R_POOL_OP ~name:_secret ~persist:PersistEverything diff --git a/ocaml/idl/datamodel_common.ml b/ocaml/idl/datamodel_common.ml index 07ea70f90a4..3ee1bcebd99 100644 --- a/ocaml/idl/datamodel_common.ml +++ b/ocaml/idl/datamodel_common.ml @@ -792,8 +792,7 @@ let default_field_writer_roles = _R_POOL_ADMIN (* by default, only root can write to them *) (** Create an object and map the object name into the messages *) -let create_obj ?lifecycle ~in_oss_since ?in_product_since - ?(internal_deprecated_since = None) ~gen_constructor_destructor ~gen_events +let create_obj ?lifecycle ~in_oss_since ~gen_constructor_destructor ~gen_events ~persist ~name ~descr ~doccomments ~contents ~messages ~in_db ?(contents_default_reader_roles = default_field_reader_roles) ?(contents_default_writer_roles = None) @@ -844,26 +843,10 @@ let create_obj ?lifecycle ~in_oss_since ?in_product_since ) contents in - if lifecycle = None && in_product_since = None then - failwith ("Lifecycle for class '" ^ name ^ "' not specified") ; let lifecycle = match lifecycle with | None -> - let published = - match in_product_since with - | None -> - [] - | Some rel -> - [(Published, rel, descr)] - in - let deprecated = - match internal_deprecated_since with - | None -> - [] - | Some rel -> - [(Deprecated, rel, "")] - in - published @ deprecated + failwith ("Lifecycle for class '" ^ name ^ "' not specified") | Some l -> l in diff --git a/ocaml/idl/datamodel_diagnostics.ml b/ocaml/idl/datamodel_diagnostics.ml index b81d12ca905..2abcfcdc7ba 100644 --- a/ocaml/idl/datamodel_diagnostics.ml +++ b/ocaml/idl/datamodel_diagnostics.ml @@ -58,7 +58,14 @@ let network_stats = () let t = - create_obj ~in_db:false ~in_product_since:Datamodel_types.rel_stockholm + create_obj ~in_db:false + ~lifecycle: + [ + ( Published + , Datamodel_types.rel_stockholm + , "A set of functions for diagnostic purpose" + ) + ] ~in_oss_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_diagnostics ~descr:"A set of functions for diagnostic purpose" ~gen_events:false ~doccomments:[] diff --git a/ocaml/idl/datamodel_host.ml b/ocaml/idl/datamodel_host.ml index 669ffeab248..c6648b0b9b4 100644 --- a/ocaml/idl/datamodel_host.ml +++ b/ocaml/idl/datamodel_host.ml @@ -2314,10 +2314,11 @@ let latest_synced_updates_applied_state = (** Hosts *) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_host - ~descr:"A physical host" ~gen_events:true ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "A physical host")] + ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_host ~descr:"A physical host" + ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages: [ disable diff --git a/ocaml/idl/datamodel_pool.ml b/ocaml/idl/datamodel_pool.ml index d13858363ab..f9b0de3d65c 100644 --- a/ocaml/idl/datamodel_pool.ml +++ b/ocaml/idl/datamodel_pool.ml @@ -1509,10 +1509,11 @@ let get_guest_secureboot_readiness = (** A pool class *) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None - ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_pool - ~descr:"Pool-wide information" ~gen_events:true ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP + create_obj ~in_db:true + ~lifecycle:[(Published, rel_rio, "Pool-wide information")] + ~in_oss_since:None ~persist:PersistEverything + ~gen_constructor_destructor:false ~name:_pool ~descr:"Pool-wide information" + ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages: [ join diff --git a/ocaml/idl/datamodel_vm.ml b/ocaml/idl/datamodel_vm.ml index 69bae5f43d9..acbf67df3f8 100644 --- a/ocaml/idl/datamodel_vm.ml +++ b/ocaml/idl/datamodel_vm.ml @@ -2353,8 +2353,8 @@ let get_secureboot_readiness = (** VM (or 'guest') configuration: *) let t = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 - ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vm + create_obj ~in_db:true ~in_oss_since:oss_since_303 ~persist:PersistEverything + ~gen_constructor_destructor:true ~name:_vm ~descr:"A virtual machine (or 'guest')." ~gen_events:true ~doccomments: [ From 3c857e14835ed216698cc4fe783b856e98bc9310 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 24 Sep 2024 08:31:57 +0100 Subject: [PATCH 038/141] idl: Make lifecycle explicit for callers of 'names' This is the only commit in the series that changes the schematest hash, because previous implementation of names created two fields with different lifecycle descriptions, and when the callers are changed to provide an explicit lifecycle argument, the description is the same for both fields. It's been chosen to provide an empty description instead (since any actual description would be duplicated for both fields, further confusing matters), except for one case where it follows the rest of the class with 'lifecycle:removed'. Signed-off-by: Andrii Sultanov --- ocaml/idl/datamodel.ml | 72 +++++++++++++++++++++++++------ ocaml/idl/datamodel_host.ml | 4 +- ocaml/idl/datamodel_observer.ml | 4 +- ocaml/idl/datamodel_repository.ml | 7 ++- ocaml/idl/datamodel_vm.ml | 5 ++- ocaml/idl/datamodel_vm_group.ml | 4 +- ocaml/idl/schematest.ml | 2 +- 7 files changed, 78 insertions(+), 20 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 88341f3f6b2..1e813b17d1f 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -468,7 +468,13 @@ module Task = struct ~contents: ([ uid _task - ; namespace ~name:"name" ~contents:(names oss_since_303 DynamicRO) () + ; namespace ~name:"name" + ~contents: + (names + ~lifecycle:[(Published, rel_rio, "")] + oss_since_303 DynamicRO + ) + () ] @ allowed_and_current_operations task_allowed_operations @ [ @@ -843,7 +849,9 @@ module Pool_update = struct ~contents: [ uid ~in_oss_since:None _pool_update - ; namespace ~name:"name" ~contents:(names None StaticRO) () + ; namespace ~name:"name" + ~contents:(names None StaticRO ~lifecycle:[(Published, rel_rio, "")]) + () ; field ~in_product_since:rel_ely ~default_value:(Some (VString "")) ~in_oss_since:None ~qualifier:StaticRO ~ty:String "version" "Update version number" @@ -1037,7 +1045,9 @@ module Pool_patch = struct ~contents: [ uid ~in_oss_since:None _pool_patch - ; namespace ~name:"name" ~contents:(names None StaticRO) () + ; namespace ~name:"name" + ~contents:(names None StaticRO ~lifecycle:[(Published, rel_rio, "")]) + () ; field ~in_product_since:rel_miami ~default_value:(Some (VString "")) ~in_oss_since:None ~qualifier:StaticRO ~ty:String "version" "Patch version number" @@ -1116,7 +1126,9 @@ module Host_patch = struct ~contents: [ uid ~in_oss_since:None _host_patch - ; namespace ~name:"name" ~contents:(names None StaticRO) () + ; namespace ~name:"name" + ~contents:(names None StaticRO ~lifecycle:[(Published, rel_rio, "")]) + () ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO ~ty:String "version" "Patch version number" ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO @@ -1538,7 +1550,11 @@ module Network = struct ([ uid _network ; namespace ~name:"name" - ~contents:(names ~writer_roles:_R_POOL_OP oss_since_303 RW) + ~contents: + (names ~writer_roles:_R_POOL_OP + ~lifecycle:[(Published, rel_rio, "")] + oss_since_303 RW + ) () ] @ allowed_and_current_operations ~writer_roles:_R_POOL_OP operations @@ -3239,7 +3255,12 @@ module Data_source = struct ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_ADMIN ~messages:[] ~contents: [ - namespace ~name:"name" ~contents:(names oss_since_303 DynamicRO) () + namespace ~name:"name" + ~contents: + (names oss_since_303 DynamicRO + ~lifecycle:[(Published, rel_rio, "")] + ) + () ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio "enabled" "true if the data source is being logged" ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio @@ -3949,7 +3970,12 @@ module SR = struct ~contents: ([ uid _sr - ; namespace ~name:"name" ~contents:(names oss_since_303 StaticRO) () + ; namespace ~name:"name" + ~contents: + (names oss_since_303 StaticRO + ~lifecycle:[(Published, rel_rio, "")] + ) + () ] @ allowed_and_current_operations operations @ [ @@ -4038,7 +4064,10 @@ module SM = struct ~contents: [ uid _sm - ; namespace ~name:"name" ~contents:(names None DynamicRO) () + ; namespace ~name:"name" + ~contents: + (names None DynamicRO ~lifecycle:[(Published, rel_rio, "")]) + () ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:DynamicRO "type" "SR.type" ; field ~in_oss_since:None ~in_product_since:rel_rio @@ -5187,7 +5216,12 @@ module VDI = struct ~contents: ([ uid _vdi - ; namespace ~name:"name" ~contents:(names oss_since_303 StaticRO) () + ; namespace ~name:"name" + ~contents: + (names oss_since_303 StaticRO + ~lifecycle:[(Published, rel_rio, "")] + ) + () ] @ allowed_and_current_operations operations @ [ @@ -6633,7 +6667,7 @@ module VMPP = struct ~contents: [ uid ~lifecycle:removed _vmpp - ; namespace ~name:"name" ~contents:(names None RW) () + ; namespace ~name:"name" ~contents:(names None RW ~lifecycle:removed) () ; field ~lifecycle:removed ~qualifier:RW ~ty:Bool "is_policy_enabled" "enable or disable this policy" ~default_value:(Some (VBool true)) ; field ~lifecycle:removed ~qualifier:RW ~ty:backup_type "backup_type" @@ -6842,7 +6876,9 @@ module VMSS = struct ~contents: [ uid _vmss - ; namespace ~name:"name" ~contents:(names None RW) () + ; namespace ~name:"name" + ~contents:(names None RW ~lifecycle:[(Published, rel_rio, "")]) + () ; field ~qualifier:RW ~ty:Bool ~in_product_since:rel_rio "enabled" "enable or disable this snapshot schedule" ~default_value:(Some (VBool true)) @@ -7030,7 +7066,10 @@ module VM_appliance = struct ] ~contents: ([ - uid _vm_appliance; namespace ~name:"name" ~contents:(names None RW) () + uid _vm_appliance + ; namespace ~name:"name" + ~contents:(names None RW ~lifecycle:[(Published, rel_rio, "")]) + () ] @ allowed_and_current_operations operations @ [ @@ -7370,7 +7409,10 @@ module Blob = struct ~contents: [ uid _blob - ; namespace ~name:"name" ~contents:(names oss_since_303 RW) () + ; namespace ~name:"name" + ~contents: + (names oss_since_303 RW ~lifecycle:[(Published, rel_rio, "")]) + () ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "size" "Size of the binary data, in bytes" ; field ~writer_roles:_R_POOL_OP ~qualifier:RW @@ -8648,7 +8690,9 @@ module Feature = struct ~contents: [ uid _feature ~lifecycle:[(Published, rel_falcon, "")] - ; namespace ~name:"name" ~contents:(names None StaticRO) () + ; namespace ~name:"name" + ~contents:(names None StaticRO ~lifecycle:[(Published, rel_rio, "")]) + () ; field ~qualifier:DynamicRO ~ty:Bool ~lifecycle:[(Published, rel_falcon, "")] ~default_value:(Some (VBool false)) "enabled" diff --git a/ocaml/idl/datamodel_host.ml b/ocaml/idl/datamodel_host.ml index c6648b0b9b4..d34daafc08a 100644 --- a/ocaml/idl/datamodel_host.ml +++ b/ocaml/idl/datamodel_host.ml @@ -2454,7 +2454,9 @@ let t = ~contents: ([ uid _host - ; namespace ~name:"name" ~contents:(names None RW) () + ; namespace ~name:"name" + ~contents:(names None RW ~lifecycle:[(Published, rel_rio, "")]) + () ; namespace ~name:"memory" ~contents:host_memory () ] @ allowed_and_current_operations operations diff --git a/ocaml/idl/datamodel_observer.ml b/ocaml/idl/datamodel_observer.ml index 1d80d030a62..523c63aef7c 100644 --- a/ocaml/idl/datamodel_observer.ml +++ b/ocaml/idl/datamodel_observer.ml @@ -114,7 +114,9 @@ let t = ~contents: ([ uid _observer ~lifecycle:[] - ; namespace ~name:"name" ~contents:(names None RW) () + ; namespace ~name:"name" + ~contents:(names None RW ~lifecycle:[(Published, rel_rio, "")]) + () ] @ [ field ~qualifier:StaticRO ~ty:(Set (Ref _host)) ~lifecycle:[] "hosts" diff --git a/ocaml/idl/datamodel_repository.ml b/ocaml/idl/datamodel_repository.ml index 114242d913f..2142084c984 100644 --- a/ocaml/idl/datamodel_repository.ml +++ b/ocaml/idl/datamodel_repository.ml @@ -182,7 +182,12 @@ let t = [ uid _repository ~lifecycle:[(Published, "1.301.0", "")] ; namespace ~name:"name" - ~contents:(names ~writer_roles:(_R_POOL_OP ++ _R_CLIENT_CERT) None RW) + ~contents: + (names + ~writer_roles:(_R_POOL_OP ++ _R_CLIENT_CERT) + ~lifecycle:[(Published, rel_rio, "")] + None RW + ) () ; field ~qualifier:StaticRO ~lifecycle:[(Published, "1.301.0", "")] diff --git a/ocaml/idl/datamodel_vm.ml b/ocaml/idl/datamodel_vm.ml index acbf67df3f8..4815a29c1ce 100644 --- a/ocaml/idl/datamodel_vm.ml +++ b/ocaml/idl/datamodel_vm.ml @@ -2496,7 +2496,10 @@ let t = ([uid _vm] @ allowed_and_current_operations operations @ [ - namespace ~name:"name" ~contents:(names oss_since_303 RW) () + namespace ~name:"name" + ~contents: + (names oss_since_303 RW ~lifecycle:[(Published, rel_rio, "")]) + () ; field ~writer_roles:_R_VM_OP ~qualifier:StaticRO ~default_value:(Some (VEnum "Halted")) ~lifecycle: diff --git a/ocaml/idl/datamodel_vm_group.ml b/ocaml/idl/datamodel_vm_group.ml index 58016a31d0a..efc86791bd8 100644 --- a/ocaml/idl/datamodel_vm_group.ml +++ b/ocaml/idl/datamodel_vm_group.ml @@ -33,7 +33,9 @@ let t = ~contents: [ uid _vm_group - ; namespace ~name:"name" ~contents:(names None RW) () + ; namespace ~name:"name" + ~contents:(names ~lifecycle:[(Published, rel_rio, "")] None RW) + () ; field ~qualifier:StaticRO ~lifecycle:[] ~ty:placement_policy "placement" ~default_value:(Some (VEnum "normal")) "The placement policy of the VM group" diff --git a/ocaml/idl/schematest.ml b/ocaml/idl/schematest.ml index 611dc17f605..016a90960f3 100644 --- a/ocaml/idl/schematest.ml +++ b/ocaml/idl/schematest.ml @@ -3,7 +3,7 @@ let hash x = Digest.string x |> Digest.to_hex (* BEWARE: if this changes, check that schema has been bumped accordingly in ocaml/idl/datamodel_common.ml, usually schema_minor_vsn *) -let last_known_schema_hash = "5f1637f4ddfaa2a0dfb6cfc318451855" +let last_known_schema_hash = "8fcd8892ec0c7d130b0da44c5fd3990b" let current_schema_hash : string = let open Datamodel_types in From 9790ccb407e5e50d9a9f5721c31c99b6fd41f38b Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 24 Sep 2024 08:35:24 +0100 Subject: [PATCH 039/141] idl: Convert {in_product,internal_deprecated}_since parameters to lifecycle for fields These were already being replaced with lifecycle equivalents in datamodel_common, search and replace them all and remove parameters as such. Signed-off-by: Andrii Sultanov --- ocaml/idl/datamodel.ml | 2278 ++++++++++++++++++++++++------- ocaml/idl/datamodel_common.ml | 22 +- ocaml/idl/datamodel_host.ml | 307 ++++- ocaml/idl/datamodel_pool.ml | 343 ++++- ocaml/idl/datamodel_vm.ml | 556 ++++++-- ocaml/idl/datamodel_vm_group.ml | 2 + ocaml/idl/datamodel_vtpm.ml | 23 +- 7 files changed, 2837 insertions(+), 694 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 1e813b17d1f..22b918a8a52 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -257,56 +257,159 @@ module Session = struct ~contents: [ uid _session - ; field ~qualifier:DynamicRO ~ty:(Ref _host) ~in_product_since:rel_rio + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + ; field ~qualifier:DynamicRO ~ty:(Ref _host) + ~lifecycle:[(Published, rel_rio, "Currently connected host")] "this_host" "Currently connected host" - ; field ~qualifier:DynamicRO ~ty:(Ref _user) ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:(Ref _user) + ~lifecycle:[(Published, rel_rio, "Currently connected user")] "this_user" "Currently connected user" - ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:DateTime + ~lifecycle: + [ + ( Published + , rel_rio + , "Timestamp for last time session was active" + ) + ] "last_active" "Timestamp for last time session was active" ; field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None - ~in_product_since:rel_rio "pool" + ~lifecycle: + [ + ( Published + , rel_rio + , "True if this session relates to a intra-pool login, false \ + otherwise" + ) + ] + "pool" "True if this session relates to a intra-pool login, false \ otherwise" - ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_miami, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" - ; field ~in_product_since:rel_george ~qualifier:DynamicRO - ~default_value:(Some (VBool false)) ~ty:Bool "is_local_superuser" + ; field + ~lifecycle: + [ + ( Published + , rel_george + , "true iff this session was created using local superuser \ + credentials" + ) + ] + ~qualifier:DynamicRO ~default_value:(Some (VBool false)) ~ty:Bool + "is_local_superuser" "true iff this session was created using local superuser \ credentials" - ; field ~in_product_since:rel_george ~qualifier:DynamicRO - ~default_value:(Some (VRef null_ref)) ~ty:(Ref _subject) "subject" + ; field + ~lifecycle: + [ + ( Published + , rel_george + , "references the subject instance that created the session. \ + If a session instance has is_local_superuser set, then the \ + value of this field is undefined." + ) + ] + ~qualifier:DynamicRO ~default_value:(Some (VRef null_ref)) + ~ty:(Ref _subject) "subject" "references the subject instance that created the session. If a \ session instance has is_local_superuser set, then the value of \ this field is undefined." - ; field ~in_product_since:rel_george ~qualifier:DynamicRO - ~default_value:(Some (VDateTime Date.epoch)) ~ty:DateTime - "validation_time" "time when session was last validated" - ; field ~in_product_since:rel_george ~qualifier:DynamicRO - ~default_value:(Some (VString "")) ~ty:String "auth_user_sid" + ; field + ~lifecycle: + [(Published, rel_george, "time when session was last validated")] + ~qualifier:DynamicRO ~default_value:(Some (VDateTime Date.epoch)) + ~ty:DateTime "validation_time" + "time when session was last validated" + ; field + ~lifecycle: + [ + ( Published + , rel_george + , "the subject identifier of the user that was externally \ + authenticated. If a session instance has is_local_superuser \ + set, then the value of this field is undefined." + ) + ] + ~qualifier:DynamicRO ~default_value:(Some (VString "")) ~ty:String + "auth_user_sid" "the subject identifier of the user that was externally \ authenticated. If a session instance has is_local_superuser set, \ then the value of this field is undefined." - ; field ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO - ~default_value:(Some (VString "")) ~ty:String "auth_user_name" + ; field + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "the subject name of the user that was externally \ + authenticated. If a session instance has is_local_superuser \ + set, then the value of this field is undefined." + ) + ] + ~qualifier:DynamicRO ~default_value:(Some (VString "")) ~ty:String + "auth_user_name" "the subject name of the user that was externally authenticated. \ If a session instance has is_local_superuser set, then the value \ of this field is undefined." - ; field ~in_product_since:rel_midnight_ride ~qualifier:StaticRO - ~default_value:(Some (VSet [])) ~ty:(Set String) "rbac_permissions" - "list with all RBAC permissions for this session" - ; field ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO - ~ty:(Set (Ref _task)) "tasks" + ; field + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "list with all RBAC permissions for this session" + ) + ] + ~qualifier:StaticRO ~default_value:(Some (VSet [])) ~ty:(Set String) + "rbac_permissions" "list with all RBAC permissions for this session" + ; field + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "list of tasks created using the current session" + ) + ] + ~qualifier:DynamicRO ~ty:(Set (Ref _task)) "tasks" "list of tasks created using the current session" - ; field ~in_product_since:rel_midnight_ride ~qualifier:StaticRO - ~default_value:(Some (VRef null_ref)) ~ty:(Ref _session) "parent" + ; field + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "references the parent session that created this session" + ) + ] + ~qualifier:StaticRO ~default_value:(Some (VRef null_ref)) + ~ty:(Ref _session) "parent" "references the parent session that created this session" - ; field ~in_product_since:rel_clearwater ~qualifier:DynamicRO - ~default_value:(Some (VString "")) ~ty:String "originator" + ; field + ~lifecycle: + [ + ( Published + , rel_clearwater + , "a key string provided by a API user to distinguish itself \ + from other users sharing the same login name" + ) + ] + ~qualifier:DynamicRO ~default_value:(Some (VString "")) ~ty:String + "originator" "a key string provided by a API user to distinguish itself from \ other users sharing the same login name" - ; field ~in_product_since:"21.2.0" ~qualifier:DynamicRO - ~default_value:(Some (VBool false)) ~ty:Bool "client_certificate" + ; field + ~lifecycle: + [ + ( Published + , "21.2.0" + , "indicates whether this session was authenticated using a \ + client certificate" + ) + ] + ~qualifier:DynamicRO ~default_value:(Some (VBool false)) ~ty:Bool + "client_certificate" "indicates whether this session was authenticated using a client \ certificate" ] @@ -468,6 +571,8 @@ module Task = struct ~contents: ([ uid _task + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] ; namespace ~name:"name" ~contents: (names @@ -478,56 +583,142 @@ module Task = struct ] @ allowed_and_current_operations task_allowed_operations @ [ - field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + field ~qualifier:DynamicRO ~ty:DateTime + ~lifecycle:[(Published, rel_rio, "Time task was created")] "created" "Time task was created" - ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:DateTime + ~lifecycle: + [ + ( Published + , rel_rio + , "Time task finished (i.e. succeeded or failed). If \ + task-status is pending, then the value of this field has \ + no meaning" + ) + ] "finished" "Time task finished (i.e. succeeded or failed). If task-status \ is pending, then the value of this field has no meaning" - ; field ~qualifier:DynamicRO ~ty:status_type ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:status_type + ~lifecycle:[(Published, rel_rio, "current status of the task")] "status" "current status of the task" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "the session that created the task")] ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _session) "session" "the session that created the task" - ; field ~qualifier:DynamicRO ~ty:(Ref _host) ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:(Ref _host) + ~lifecycle: + [(Published, rel_rio, "the host on which the task is running")] "resident_on" "the host on which the task is running" - ; field ~qualifier:DynamicRO ~ty:Float ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Float + ~lifecycle: + [ + ( Published + , rel_rio + , "This field contains the estimated fraction of the task \ + which is complete. This field should not be used to \ + determine whether the task is complete - for this the \ + status field of the task should be used." + ) + ] "progress" "This field contains the estimated fraction of the task which is \ complete. This field should not be used to determine whether \ the task is complete - for this the status field of the task \ should be used." - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "If the task has spawned a program, the field record the \ + PID of the process that the task is waiting on. (-1 if no \ + waiting completion of an external program )" + ) + ] ~internal_only:true ~qualifier:DynamicRO ~ty:Int "externalpid" "If the task has spawned a program, the field record the PID of \ the process that the task is waiting on. (-1 if no waiting \ completion of an external program )" - ; field ~in_oss_since:None ~in_product_since:rel_rio - ~internal_deprecated_since:rel_boston ~internal_only:true - ~qualifier:DynamicRO ~ty:Int "stunnelpid" + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "If the task has been forwarded, this field records the \ + pid of the stunnel process spawned to manage the \ + forwarding connection" + ) + ; (Deprecated, rel_boston, "") + ] + ~internal_only:true ~qualifier:DynamicRO ~ty:Int "stunnelpid" "If the task has been forwarded, this field records the pid of \ the stunnel process spawned to manage the forwarding connection" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "True if this task has been forwarded to a slave" + ) + ] ~internal_only:true ~qualifier:DynamicRO ~ty:Bool "forwarded" "True if this task has been forwarded to a slave" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "The host to which the task has been forwarded" + ) + ] ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _host) "forwarded_to" "The host to which the task has been forwarded" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle: + [ + ( Published + , rel_rio + , "if the task has completed successfully, this field \ + contains the type of the encoded result (i.e. name of the \ + class whose reference is in the result field). Undefined \ + otherwise." + ) + ] "type" "if the task has completed successfully, this field contains the \ type of the encoded result (i.e. name of the class whose \ reference is in the result field). Undefined otherwise." - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle: + [ + ( Published + , rel_rio + , "if the task has completed successfully, this field \ + contains the result value (either Void or an object \ + reference). Undefined otherwise." + ) + ] "result" "if the task has completed successfully, this field contains the \ result value (either Void or an object reference). Undefined \ otherwise." ; field ~qualifier:DynamicRO ~ty:(Set String) - ~in_product_since:rel_rio "error_info" + ~lifecycle: + [ + ( Published + , rel_rio + , "if the task has failed, this field contains the set of \ + associated error strings. Undefined otherwise." + ) + ] + "error_info" "if the task has failed, this field contains the set of \ associated error strings. Undefined otherwise." - ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_miami, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ~map_keys_roles: @@ -536,13 +727,27 @@ module Task = struct ; ("XenCenterUUID", _R_VM_OP) ; ("XenCenterMeddlingActionTitle", _R_VM_OP) ] - ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_orlando + , "Ref pointing to the task this is a substask of." + ) + ] ~default_value:(Some (VRef "")) ~ty:(Ref _task) "subtask_of" "Ref pointing to the task this is a substask of." - ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + (Published, rel_orlando, "List pointing to all the substasks.") + ] ~ty:(Set (Ref _task)) "subtasks" "List pointing to all the substasks." - ; field ~qualifier:DynamicRO ~in_product_since:rel_dundee ~ty:String + ; field ~qualifier:DynamicRO + ~lifecycle: + [(Published, rel_dundee, "Function call trace for debugging.")] + ~ty:String ~default_value: (Some (VString (Sexplib0.Sexp.to_string Backtrace.(sexp_of_t empty)) @@ -592,10 +797,17 @@ module User = struct ~contents: [ uid _user - ; field ~qualifier:StaticRO ~in_product_since:rel_rio "short_name" - "short name (e.g. userid)" - ; field ~in_product_since:rel_rio "fullname" "full name" - ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + ; field ~qualifier:StaticRO + ~lifecycle:[(Published, rel_rio, "short name (e.g. userid)")] + "short_name" "short name (e.g. userid)" + ; field + ~lifecycle:[(Published, rel_rio, "full name")] + "fullname" "full name" + ; field + ~lifecycle:[(Published, rel_orlando, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ] @@ -650,18 +862,27 @@ module Host_crashdump = struct ~messages_default_allowed_roles:_R_POOL_OP ~messages:[destroy; upload] ~contents: [ - uid ~in_oss_since:None _host_crashdump - ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO - ~ty:(Ref _host) "host" "Host the crashdump relates to" - ; field ~in_oss_since:None ~in_product_since:rel_rio + uid ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _host_crashdump + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Host the crashdump relates to")] + ~qualifier:StaticRO ~ty:(Ref _host) "host" + "Host the crashdump relates to" + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Time the crash happened")] ~qualifier:DynamicRO ~ty:DateTime "timestamp" "Time the crash happened" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Size of the crashdump")] ~qualifier:DynamicRO ~ty:Int "size" "Size of the crashdump" ; field ~qualifier:StaticRO ~ty:String ~in_oss_since:None - ~in_product_since:rel_rio ~internal_only:true "filename" - "filename of crash dir" - ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) + ~lifecycle:[(Published, rel_rio, "filename of crash dir")] + ~internal_only:true "filename" "filename of crash dir" + ; field + ~lifecycle:[(Published, rel_miami, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ] @@ -848,34 +1069,61 @@ module Pool_update = struct ] ~contents: [ - uid ~in_oss_since:None _pool_update + uid ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _pool_update ; namespace ~name:"name" ~contents:(names None StaticRO ~lifecycle:[(Published, rel_rio, "")]) () - ; field ~in_product_since:rel_ely ~default_value:(Some (VString "")) - ~in_oss_since:None ~qualifier:StaticRO ~ty:String "version" - "Update version number" - ; field ~in_product_since:rel_ely + ; field + ~lifecycle:[(Published, rel_ely, "Update version number")] + ~default_value:(Some (VString "")) ~in_oss_since:None + ~qualifier:StaticRO ~ty:String "version" "Update version number" + ; field + ~lifecycle:[(Published, rel_ely, "Size of the update in bytes")] ~default_value:(Some (VInt Int64.zero)) ~in_oss_since:None ~qualifier:StaticRO ~ty:Int "installation_size" "Size of the update in bytes" - ; field ~in_product_since:rel_ely ~default_value:(Some (VString "")) - ~in_oss_since:None ~qualifier:StaticRO ~ty:String "key" - "GPG key of the update" - ; field ~in_product_since:rel_ely ~default_value:(Some (VSet [])) - ~in_oss_since:None ~qualifier:StaticRO - ~ty:(Set after_apply_guidance) "after_apply_guidance" + ; field + ~lifecycle:[(Published, rel_ely, "GPG key of the update")] + ~default_value:(Some (VString "")) ~in_oss_since:None + ~qualifier:StaticRO ~ty:String "key" "GPG key of the update" + ; field + ~lifecycle: + [ + ( Published + , rel_ely + , "What the client should do after this update has been \ + applied." + ) + ] + ~default_value:(Some (VSet [])) ~in_oss_since:None + ~qualifier:StaticRO ~ty:(Set after_apply_guidance) + "after_apply_guidance" "What the client should do after this update has been applied." - ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO - ~ty:(Ref _vdi) "vdi" "VDI the update was uploaded to" - ; field ~in_product_since:rel_ely ~in_oss_since:None - ~qualifier:DynamicRO ~ty:(Set (Ref _host)) "hosts" - "The hosts that have applied this update." - ; field ~in_product_since:rel_inverness ~default_value:(Some (VMap [])) - ~in_oss_since:None + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "VDI the update was uploaded to")] + ~qualifier:StaticRO ~ty:(Ref _vdi) "vdi" + "VDI the update was uploaded to" + ; field + ~lifecycle: + [(Published, rel_ely, "The hosts that have applied this update.")] + ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set (Ref _host)) + "hosts" "The hosts that have applied this update." + ; field + ~lifecycle:[(Published, rel_inverness, "additional configuration")] + ~default_value:(Some (VMap [])) ~in_oss_since:None ~ty:(Map (String, String)) "other_config" "additional configuration" - ; field ~in_product_since:rel_inverness + ; field + ~lifecycle: + [ + ( Published + , rel_inverness + , "Flag - if true, all hosts in a pool must apply this update" + ) + ] ~default_value:(Some (VBool false)) ~in_oss_since:None ~qualifier:StaticRO ~ty:Bool "enforce_homogeneity" "Flag - if true, all hosts in a pool must apply this update" @@ -1044,34 +1292,68 @@ module Pool_patch = struct [apply; pool_apply; precheck; clean; pool_clean; destroy; clean_on_host] ~contents: [ - uid ~in_oss_since:None _pool_patch + uid ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _pool_patch ; namespace ~name:"name" ~contents:(names None StaticRO ~lifecycle:[(Published, rel_rio, "")]) () - ; field ~in_product_since:rel_miami ~default_value:(Some (VString "")) - ~in_oss_since:None ~qualifier:StaticRO ~ty:String "version" - "Patch version number" - ; field ~in_product_since:rel_miami ~default_value:(Some (VString "")) - ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO - ~ty:String "filename" "Filename of the patch" - ; field ~in_product_since:rel_miami + ; field + ~lifecycle:[(Published, rel_miami, "Patch version number")] + ~default_value:(Some (VString "")) ~in_oss_since:None + ~qualifier:StaticRO ~ty:String "version" "Patch version number" + ; field + ~lifecycle:[(Published, rel_miami, "Filename of the patch")] + ~default_value:(Some (VString "")) ~in_oss_since:None + ~internal_only:true ~qualifier:DynamicRO ~ty:String "filename" + "Filename of the patch" + ; field + ~lifecycle:[(Published, rel_miami, "Size of the patch")] ~default_value:(Some (VInt Int64.zero)) ~in_oss_since:None ~qualifier:DynamicRO ~ty:Int "size" "Size of the patch" - ; field ~in_product_since:rel_miami ~default_value:(Some (VBool false)) - ~in_oss_since:None ~qualifier:DynamicRO ~ty:Bool "pool_applied" + ; field + ~lifecycle: + [ + ( Published + , rel_miami + , "This patch should be applied across the entire pool" + ) + ] + ~default_value:(Some (VBool false)) ~in_oss_since:None + ~qualifier:DynamicRO ~ty:Bool "pool_applied" "This patch should be applied across the entire pool" - ; field ~in_product_since:rel_miami ~in_oss_since:None - ~qualifier:DynamicRO ~ty:(Set (Ref _host_patch)) "host_patches" - "This hosts this patch is applied to." - ; field ~in_product_since:rel_miami ~default_value:(Some (VSet [])) - ~in_oss_since:None ~qualifier:DynamicRO - ~ty:(Set after_apply_guidance) "after_apply_guidance" + ; field + ~lifecycle: + [(Published, rel_miami, "This hosts this patch is applied to.")] + ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set (Ref _host_patch)) + "host_patches" "This hosts this patch is applied to." + ; field + ~lifecycle: + [ + ( Published + , rel_miami + , "What the client should do after this patch has been applied." + ) + ] + ~default_value:(Some (VSet [])) ~in_oss_since:None + ~qualifier:DynamicRO ~ty:(Set after_apply_guidance) + "after_apply_guidance" "What the client should do after this patch has been applied." - ; field ~in_product_since:rel_ely ~default_value:(Some (VRef null_ref)) - ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _pool_update) - "pool_update" "A reference to the associated pool_update object" - ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) - ~in_oss_since:None + ; field + ~lifecycle: + [ + ( Published + , rel_ely + , "A reference to the associated pool_update object" + ) + ] + ~default_value:(Some (VRef null_ref)) ~in_oss_since:None + ~qualifier:StaticRO ~ty:(Ref _pool_update) "pool_update" + "A reference to the associated pool_update object" + ; field + ~lifecycle:[(Published, rel_miami, "additional configuration")] + ~default_value:(Some (VMap [])) ~in_oss_since:None ~ty:(Map (String, String)) "other_config" "additional configuration" ] @@ -1125,29 +1407,43 @@ module Host_patch = struct ~messages_default_allowed_roles:_R_POOL_OP ~messages:[destroy; apply] ~contents: [ - uid ~in_oss_since:None _host_patch + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + ~in_oss_since:None _host_patch ; namespace ~name:"name" ~contents:(names None StaticRO ~lifecycle:[(Published, rel_rio, "")]) () - ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO - ~ty:String "version" "Patch version number" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO - ~ty:(Ref _host) "host" "Host the patch relates to" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~internal_only:true - ~qualifier:DynamicRO ~ty:String "filename" "Filename of the patch" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Patch version number")] + ~qualifier:StaticRO ~ty:String "version" "Patch version number" + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Host the patch relates to")] + ~qualifier:StaticRO ~ty:(Ref _host) "host" + "Host the patch relates to" + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Filename of the patch")] + ~internal_only:true ~qualifier:DynamicRO ~ty:String "filename" + "Filename of the patch" + ; field ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "True if the patch has been applied")] ~qualifier:DynamicRO ~ty:Bool "applied" "True if the patch has been applied" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Time the patch was applied")] ~qualifier:DynamicRO ~ty:DateTime "timestamp_applied" "Time the patch was applied" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Size of the patch")] ~qualifier:DynamicRO ~ty:Int "size" "Size of the patch" - ; field ~in_product_since:rel_miami ~in_oss_since:None - ~qualifier:StaticRO ~ty:(Ref _pool_patch) + ; field + ~lifecycle:[(Published, rel_miami, "The patch applied")] + ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _pool_patch) ~default_value:(Some (VRef "")) "pool_patch" "The patch applied" - ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) - ~in_oss_since:None + ; field + ~lifecycle:[(Published, rel_miami, "additional configuration")] + ~default_value:(Some (VMap [])) ~in_oss_since:None ~ty:(Map (String, String)) "other_config" "additional configuration" ] @@ -1158,8 +1454,9 @@ module Host_metrics = struct let host_metrics_memory = let field = field ~ty:Int in [ - field ~qualifier:DynamicRO ~in_product_since:rel_rio "total" - "Total host memory (bytes)" ~doc_tags:[Memory] + field ~qualifier:DynamicRO + ~lifecycle:[(Published, rel_rio, "Total host memory (bytes)")] + "total" "Total host memory (bytes)" ~doc_tags:[Memory] ; field "free" "Free host memory (bytes)" ~default_value:(Some (VInt 0L)) ~lifecycle: [ @@ -1179,13 +1476,27 @@ module Host_metrics = struct ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages:[] ~contents: [ - uid _host_metrics + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _host_metrics ; namespace ~name:"memory" ~contents:host_metrics_memory () ; field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None "live" - ~in_product_since:rel_rio "Pool master thinks this host is live" - ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + ~lifecycle: + [(Published, rel_rio, "Pool master thinks this host is live")] + "Pool master thinks this host is live" + ; field ~qualifier:DynamicRO ~ty:DateTime + ~lifecycle: + [ + ( Published + , rel_rio + , "Time at which this information was last updated" + ) + ] "last_updated" "Time at which this information was last updated" - ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_orlando, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ] @@ -1210,33 +1521,65 @@ module Host_cpu = struct ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages:[] ~contents: [ - uid _hostcpu - ; field ~qualifier:DynamicRO ~ty:(Ref _host) ~in_product_since:rel_rio + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _hostcpu + ; field ~qualifier:DynamicRO ~ty:(Ref _host) + ~lifecycle:[(Published, rel_rio, "the host the CPU is in")] "host" "the host the CPU is in" - ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "number" - "the number of the physical CPU within the host" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Int + ~lifecycle: + [ + ( Published + , rel_rio + , "the number of the physical CPU within the host" + ) + ] + "number" "the number of the physical CPU within the host" + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle:[(Published, rel_rio, "the vendor of the physical CPU")] "vendor" "the vendor of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "speed" - "the speed of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Int + ~lifecycle:[(Published, rel_rio, "the speed of the physical CPU")] + "speed" "the speed of the physical CPU" + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle: + [(Published, rel_rio, "the model name of the physical CPU")] "modelname" "the model name of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "family" - "the family (number) of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "model" - "the model number of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Int + ~lifecycle: + [(Published, rel_rio, "the family (number) of the physical CPU")] + "family" "the family (number) of the physical CPU" + ; field ~qualifier:DynamicRO ~ty:Int + ~lifecycle: + [(Published, rel_rio, "the model number of the physical CPU")] + "model" "the model number of the physical CPU" + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle: + [(Published, rel_rio, "the stepping of the physical CPU")] "stepping" "the stepping of the physical CPU" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle: + [ + ( Published + , rel_rio + , "the flags of the physical CPU (a decoded version of the \ + features field)" + ) + ] "flags" "the flags of the physical CPU (a decoded version of the features \ field)" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle:[(Published, rel_rio, "the physical CPU feature bitmap")] "features" "the physical CPU feature bitmap" ; field ~qualifier:DynamicRO ~persist:false ~ty:Float - ~in_product_since:rel_rio "utilisation" - "the current CPU utilisation" - ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) + ~lifecycle:[(Published, rel_rio, "the current CPU utilisation")] + "utilisation" "the current CPU utilisation" + ; field + ~lifecycle:[(Published, rel_orlando, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ] @@ -1246,12 +1589,16 @@ end (** Disk and network interfaces are associated with QoS parameters: *) let qos devtype = [ - field ~in_product_since:rel_rio "algorithm_type" "QoS algorithm to use" + field + ~lifecycle:[(Published, rel_rio, "QoS algorithm to use")] + "algorithm_type" "QoS algorithm to use" ; field ~ty:(Map (String, String)) - ~in_product_since:rel_rio "algorithm_params" - "parameters for chosen QoS algorithm" - ; field ~qualifier:DynamicRO ~ty:(Set String) ~in_product_since:rel_rio + ~lifecycle:[(Published, rel_rio, "parameters for chosen QoS algorithm")] + "algorithm_params" "parameters for chosen QoS algorithm" + ; field ~qualifier:DynamicRO ~ty:(Set String) + ~lifecycle: + [(Published, rel_rio, "supported QoS algorithms for this " ^ devtype)] "supported_algorithms" ("supported QoS algorithms for this " ^ devtype) ] @@ -1548,7 +1895,10 @@ module Network = struct ] ~contents: ([ - uid _network + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _network ; namespace ~name:"name" ~contents: (names ~writer_roles:_R_POOL_OP @@ -1560,11 +1910,14 @@ module Network = struct @ allowed_and_current_operations ~writer_roles:_R_POOL_OP operations @ [ field ~qualifier:DynamicRO ~ty:(Set (Ref _vif)) - ~in_product_since:rel_rio "VIFs" "list of connected vifs" + ~lifecycle:[(Published, rel_rio, "list of connected vifs")] + "VIFs" "list of connected vifs" ; field ~qualifier:DynamicRO ~ty:(Set (Ref _pif)) - ~in_product_since:rel_rio "PIFs" "list of connected pifs" + ~lifecycle:[(Published, rel_rio, "list of connected pifs")] + "PIFs" "list of connected pifs" ; field ~qualifier:RW ~ty:Int ~default_value:(Some (VInt 1500L)) - ~in_product_since:rel_midnight_ride "MTU" "MTU in octets" + ~lifecycle:[(Published, rel_midnight_ride, "MTU in octets")] + "MTU" "MTU in octets" ; field ~writer_roles:_R_POOL_OP ~ty:(Map (String, String)) "other_config" "additional configuration" @@ -1574,7 +1927,7 @@ module Network = struct ; ("XenCenter.CustomFields.*", _R_VM_OP) ; ("XenCenterCreateInProgress", _R_VM_OP) ] - ~in_product_since:rel_rio + ~lifecycle:[(Published, rel_rio, "additional configuration")] ; field ~lifecycle: [ @@ -1592,25 +1945,62 @@ module Network = struct ~lifecycle:[(Published, rel_falcon, "")] ~qualifier:StaticRO ~ty:Bool ~default_value:(Some (VBool true)) "managed" "true if the bridge is managed by xapi" - ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_orlando + , "Binary blobs associated with this network" + ) + ] ~ty:(Map (String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this network" - ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando + ; field ~writer_roles:_R_VM_OP + ~lifecycle: + [ + ( Published + , rel_orlando + , "user-specified tags for categorization purposes" + ) + ] ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes" - ; field ~qualifier:DynamicRO ~in_product_since:rel_tampa + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_tampa + , "The network will use this value to determine the \ + behaviour of all VIFs where locking_mode = default" + ) + ] ~default_value:(Some (VEnum "unlocked")) ~ty:default_locking_mode "default_locking_mode" "The network will use this value to determine the behaviour of \ all VIFs where locking_mode = default" - ; field ~qualifier:DynamicRO ~in_product_since:rel_creedence + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_creedence + , "The IP addresses assigned to VIFs on networks that have \ + active xapi-managed DHCP" + ) + ] ~default_value:(Some (VMap [])) ~ty:(Map (Ref _vif, String)) "assigned_ips" "The IP addresses assigned to VIFs on networks that have active \ xapi-managed DHCP" - ; field ~qualifier:DynamicRO ~in_product_since:rel_inverness + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_inverness + , "Set of purposes for which the server will use this network" + ) + ] ~default_value:(Some (VSet [])) ~ty:(Set purpose) "purpose" "Set of purposes for which the server will use this network" ] @@ -2143,77 +2533,195 @@ module PIF = struct ] ~contents: [ - uid _pif + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _pif ; (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO ~in_product_since:rel_rio "device" - "machine-readable name of the interface (e.g. eth0)" - ; field ~qualifier:StaticRO ~ty:(Ref _network) ~in_product_since:rel_rio + field ~qualifier:StaticRO + ~lifecycle: + [ + ( Published + , rel_rio + , "machine-readable name of the interface (e.g. eth0)" + ) + ] + "device" "machine-readable name of the interface (e.g. eth0)" + ; field ~qualifier:StaticRO ~ty:(Ref _network) + ~lifecycle: + [ + ( Published + , rel_rio + , "virtual network to which this pif is connected" + ) + ] "network" "virtual network to which this pif is connected" - ; field ~qualifier:StaticRO ~ty:(Ref _host) ~in_product_since:rel_rio + ; field ~qualifier:StaticRO ~ty:(Ref _host) + ~lifecycle: + [ + ( Published + , rel_rio + , "physical machine to which this pif is connected" + ) + ] "host" "physical machine to which this pif is connected" ; (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO ~in_product_since:rel_rio "MAC" - "ethernet MAC address of physical interface" + field ~qualifier:StaticRO + ~lifecycle: + [ + ( Published + , rel_rio + , "ethernet MAC address of physical interface" + ) + ] + "MAC" "ethernet MAC address of physical interface" ; (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_rio "MTU" - "MTU in octets" + field ~qualifier:StaticRO ~ty:Int + ~lifecycle:[(Published, rel_rio, "MTU in octets")] + "MTU" "MTU in octets" ; (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_rio "VLAN" - "VLAN tag for all traffic passing through this interface" - ; field ~in_oss_since:None ~internal_only:true ~in_product_since:rel_rio + field ~qualifier:StaticRO ~ty:Int + ~lifecycle: + [ + ( Published + , rel_rio + , "VLAN tag for all traffic passing through this interface" + ) + ] + "VLAN" "VLAN tag for all traffic passing through this interface" + ; field ~in_oss_since:None ~internal_only:true + ~lifecycle:[(Published, rel_rio, "actual dom0 device name")] "device_name" "actual dom0 device name" ; field ~qualifier:DynamicRO ~ty:(Ref _pif_metrics) - ~in_product_since:rel_rio "metrics" - "metrics associated with this PIF" - ; field ~in_oss_since:None ~ty:Bool ~in_product_since:rel_miami + ~lifecycle: + [(Published, rel_rio, "metrics associated with this PIF")] + "metrics" "metrics associated with this PIF" + ; field ~in_oss_since:None ~ty:Bool + ~lifecycle: + [ + ( Published + , rel_miami + , "true if this represents a physical network interface" + ) + ] ~qualifier:DynamicRO "physical" "true if this represents a physical network interface" ~default_value:(Some (VBool false)) - ; field ~in_oss_since:None ~ty:Bool ~in_product_since:rel_miami + ; field ~in_oss_since:None ~ty:Bool + ~lifecycle: + [(Published, rel_miami, "true if this interface is online")] ~qualifier:DynamicRO "currently_attached" "true if this interface is online" ~default_value:(Some (VBool true)) ; field ~in_oss_since:None ~ty:ip_configuration_mode - ~in_product_since:rel_miami ~qualifier:DynamicRO - "ip_configuration_mode" + ~lifecycle: + [ + ( Published + , rel_miami + , "Sets if and how this interface gets an IP address" + ) + ] + ~qualifier:DynamicRO "ip_configuration_mode" "Sets if and how this interface gets an IP address" ~default_value:(Some (VEnum "None")) - ; field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami + ; field ~in_oss_since:None ~ty:String + ~lifecycle:[(Published, rel_miami, "IP address")] ~qualifier:DynamicRO "IP" "IP address" ~default_value:(Some (VString "")) - ; field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami + ; field ~in_oss_since:None ~ty:String + ~lifecycle:[(Published, rel_miami, "IP netmask")] ~qualifier:DynamicRO "netmask" "IP netmask" ~default_value:(Some (VString "")) - ; field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami + ; field ~in_oss_since:None ~ty:String + ~lifecycle:[(Published, rel_miami, "IP gateway")] ~qualifier:DynamicRO "gateway" "IP gateway" ~default_value:(Some (VString "")) - ; field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami + ; field ~in_oss_since:None ~ty:String + ~lifecycle: + [ + ( Published + , rel_miami + , "Comma separated list of the IP addresses of the DNS servers \ + to use" + ) + ] ~qualifier:DynamicRO "DNS" "Comma separated list of the IP addresses of the DNS servers to use" ~default_value:(Some (VString "")) - ; field ~in_oss_since:None ~ty:(Ref _bond) ~in_product_since:rel_miami + ; field ~in_oss_since:None ~ty:(Ref _bond) + ~lifecycle: + [ + ( Published + , rel_miami + , "Indicates which bond this interface is part of" + ) + ] ~qualifier:DynamicRO "bond_slave_of" "Indicates which bond this interface is part of" ~default_value:(Some (VRef "")) ; field ~in_oss_since:None ~ty:(Set (Ref _bond)) - ~in_product_since:rel_miami ~qualifier:DynamicRO "bond_master_of" + ~lifecycle: + [ + ( Published + , rel_miami + , "Indicates this PIF represents the results of a bond" + ) + ] + ~qualifier:DynamicRO "bond_master_of" "Indicates this PIF represents the results of a bond" - ; field ~in_oss_since:None ~ty:(Ref _vlan) ~in_product_since:rel_miami + ; field ~in_oss_since:None ~ty:(Ref _vlan) + ~lifecycle: + [ + ( Published + , rel_miami + , "Indicates which VLAN this interface receives untagged \ + traffic from" + ) + ] ~qualifier:DynamicRO "VLAN_master_of" "Indicates which VLAN this interface receives untagged traffic from" ~default_value:(Some (VRef "")) ; field ~in_oss_since:None ~ty:(Set (Ref _vlan)) - ~in_product_since:rel_miami ~qualifier:DynamicRO "VLAN_slave_of" + ~lifecycle: + [ + ( Published + , rel_miami + , "Indicates which VLANs this interface transmits tagged \ + traffic to" + ) + ] + ~qualifier:DynamicRO "VLAN_slave_of" "Indicates which VLANs this interface transmits tagged traffic to" - ; field ~in_oss_since:None ~ty:Bool ~in_product_since:rel_miami + ; field ~in_oss_since:None ~ty:Bool + ~lifecycle: + [ + ( Published + , rel_miami + , "Indicates whether the control software is listening for \ + connections on this interface" + ) + ] ~qualifier:DynamicRO "management" "Indicates whether the control software is listening for \ connections on this interface" ~default_value:(Some (VBool false)) - ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_miami, "Additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "Additional configuration" - ; field ~in_product_since:rel_orlando ~qualifier:DynamicRO - ~default_value:(Some (VBool false)) ~ty:Bool "disallow_unplug" + ; field + ~lifecycle: + [ + ( Published + , rel_orlando + , "Prevent this PIF from being unplugged; set this to notify \ + the management tool-stack that the PIF has a special use \ + and should not be unplugged under any circumstances (e.g. \ + because you're running storage traffic over it)" + ) + ] + ~qualifier:DynamicRO ~default_value:(Some (VBool false)) ~ty:Bool + "disallow_unplug" "Prevent this PIF from being unplugged; set this to notify the \ management tool-stack that the PIF has a special use and should \ not be unplugged under any circumstances (e.g. because you're \ @@ -2281,12 +2789,24 @@ module PIF = struct ~default_value:(Some (VEnum "unknown")) "igmp_snooping_status" "The IGMP snooping status of the corresponding network bridge" ; field ~in_oss_since:None ~ty:(Set (Ref _network_sriov)) - ~in_product_since:rel_kolkata ~qualifier:DynamicRO - "sriov_physical_PIF_of" + ~lifecycle: + [ + ( Published + , rel_kolkata + , "Indicates which network_sriov this interface is physical of" + ) + ] + ~qualifier:DynamicRO "sriov_physical_PIF_of" "Indicates which network_sriov this interface is physical of" ; field ~in_oss_since:None ~ty:(Set (Ref _network_sriov)) - ~in_product_since:rel_kolkata ~qualifier:DynamicRO - "sriov_logical_PIF_of" + ~lifecycle: + [ + ( Published + , rel_kolkata + , "Indicates which network_sriov this interface is logical of" + ) + ] + ~qualifier:DynamicRO "sriov_logical_PIF_of" "Indicates which network_sriov this interface is logical of" ; field ~qualifier:DynamicRO ~ty:(Ref _pci) ~lifecycle:[(Published, rel_kolkata, "")] @@ -2314,27 +2834,61 @@ module PIF_metrics = struct ~messages:[] ~contents: [ - uid _pif_metrics + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _pif_metrics ; namespace ~name:"io" ~contents:iobandwidth () - ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Bool + ~lifecycle: + [(Published, rel_rio, "Report if the PIF got a carrier or not")] "carrier" "Report if the PIF got a carrier or not" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle:[(Published, rel_rio, "Report vendor ID")] "vendor_id" "Report vendor ID" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle:[(Published, rel_rio, "Report vendor name")] "vendor_name" "Report vendor name" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle:[(Published, rel_rio, "Report device ID")] "device_id" "Report device ID" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle:[(Published, rel_rio, "Report device name")] "device_name" "Report device name" - ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "speed" - "Speed of the link in Mbit/s (if available)" - ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio "duplex" - "Full duplex capability of the link (if available)" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Int + ~lifecycle: + [ + ( Published + , rel_rio + , "Speed of the link in Mbit/s (if available)" + ) + ] + "speed" "Speed of the link in Mbit/s (if available)" + ; field ~qualifier:DynamicRO ~ty:Bool + ~lifecycle: + [ + ( Published + , rel_rio + , "Full duplex capability of the link (if available)" + ) + ] + "duplex" "Full duplex capability of the link (if available)" + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle: + [(Published, rel_rio, "PCI bus path of the pif (if available)")] "pci_bus_path" "PCI bus path of the pif (if available)" - ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:DateTime + ~lifecycle: + [ + ( Published + , rel_rio + , "Time at which this information was last updated" + ) + ] "last_updated" "Time at which this information was last updated" - ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_orlando, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ] @@ -2447,14 +3001,27 @@ module Bond = struct ~messages:[create; destroy; set_mode; set_property] ~contents: [ - uid _bond - ; field ~in_oss_since:None ~in_product_since:rel_miami + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _bond + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_miami, "The bonded interface")] ~qualifier:StaticRO ~ty:(Ref _pif) "master" "The bonded interface" ~default_value:(Some (VRef "")) - ; field ~in_oss_since:None ~in_product_since:rel_miami + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_miami + , "The interfaces which are part of this bond" + ) + ] ~qualifier:DynamicRO ~ty:(Set (Ref _pif)) "slaves" "The interfaces which are part of this bond" - ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_miami, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ; field @@ -2469,12 +3036,22 @@ module Bond = struct ~qualifier:DynamicRO ~default_value:(Some (VEnum "balance-slb")) ~ty:mode "mode" "The algorithm used to distribute traffic among the bonded NICs" - ; field ~in_oss_since:None ~in_product_since:rel_tampa + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_tampa + , "Additional configuration properties specific to the bond \ + mode." + ) + ] ~qualifier:DynamicRO ~ty:(Map (String, String)) ~default_value:(Some (VMap [])) "properties" "Additional configuration properties specific to the bond mode." - ; field ~in_oss_since:None ~in_product_since:rel_tampa + ; field ~in_oss_since:None + ~lifecycle: + [(Published, rel_tampa, "Number of links up in this bond")] ~qualifier:DynamicRO ~ty:Int ~default_value:(Some (VInt 0L)) "links_up" "Number of links up in this bond" ; field @@ -2563,16 +3140,26 @@ module VLAN = struct ~messages:[pool_introduce; create; destroy] ~contents: [ - uid _vlan - ; field ~qualifier:StaticRO ~ty:(Ref _pif) ~in_product_since:rel_miami + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _vlan + ; field ~qualifier:StaticRO ~ty:(Ref _pif) + ~lifecycle: + [(Published, rel_miami, "interface on which traffic is tagged")] "tagged_PIF" "interface on which traffic is tagged" ~default_value:(Some (VRef "")) - ; field ~qualifier:DynamicRO ~ty:(Ref _pif) ~in_product_since:rel_miami + ; field ~qualifier:DynamicRO ~ty:(Ref _pif) + ~lifecycle: + [(Published, rel_miami, "interface on which traffic is untagged")] "untagged_PIF" "interface on which traffic is untagged" ~default_value:(Some (VRef "")) - ; field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_miami "tag" - "VLAN tag in use" ~default_value:(Some (VInt (-1L))) - ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) + ; field ~qualifier:StaticRO ~ty:Int + ~lifecycle:[(Published, rel_miami, "VLAN tag in use")] + "tag" "VLAN tag in use" ~default_value:(Some (VInt (-1L))) + ; field + ~lifecycle:[(Published, rel_miami, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ] @@ -2737,19 +3324,53 @@ module PBD = struct ~messages:[plug; unplug; set_device_config] ~contents: [ - uid _pbd - ; field ~qualifier:StaticRO ~ty:(Ref _host) ~in_product_since:rel_rio + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _pbd + ; field ~qualifier:StaticRO ~ty:(Ref _host) + ~lifecycle: + [ + ( Published + , rel_rio + , "physical machine on which the pbd is available" + ) + ] "host" "physical machine on which the pbd is available" - ; field ~qualifier:StaticRO ~ty:(Ref _sr) ~in_product_since:rel_rio "SR" - "the storage repository that the pbd realises" + ; field ~qualifier:StaticRO ~ty:(Ref _sr) + ~lifecycle: + [ + ( Published + , rel_rio + , "the storage repository that the pbd realises" + ) + ] + "SR" "the storage repository that the pbd realises" ; field ~ty:(Map (String, String)) - ~qualifier:StaticRO "device_config" ~in_product_since:rel_rio + ~qualifier:StaticRO "device_config" + ~lifecycle: + [ + ( Published + , rel_rio + , "a config string to string map that is provided to the \ + host's SR-backend-driver" + ) + ] "a config string to string map that is provided to the host's \ SR-backend-driver" - ; field ~ty:Bool ~qualifier:DynamicRO ~in_product_since:rel_rio + ; field ~ty:Bool ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_rio + , "is the SR currently attached on this host?" + ) + ] "currently_attached" "is the SR currently attached on this host?" - ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_miami, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ] @@ -2770,17 +3391,35 @@ let device_status_fields = ) ] "currently_attached" "is the device currently attached (erased on reboot)" - ; field ~ty:Int ~qualifier:DynamicRO ~in_product_since:rel_rio "status_code" + ; field ~ty:Int ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_rio + , "error/success code associated with last attach-operation (erased \ + on reboot)" + ) + ] + "status_code" "error/success code associated with last attach-operation (erased on \ reboot)" - ; field ~ty:String ~qualifier:DynamicRO ~in_product_since:rel_rio + ; field ~ty:String ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_rio + , "error/success information associated with last attach-operation \ + status (erased on reboot)" + ) + ] "status_detail" "error/success information associated with last attach-operation status \ (erased on reboot)" ; field ~ty:(Map (String, String)) - ~qualifier:DynamicRO ~in_product_since:rel_rio "runtime_properties" - "Device runtime properties" + ~qualifier:DynamicRO + ~lifecycle:[(Published, rel_rio, "Device runtime properties")] + "runtime_properties" "Device runtime properties" ] module VIF = struct @@ -3134,27 +3773,69 @@ module VIF = struct ; configure_ipv6 ] ~contents: - ([uid _vif] + ([ + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _vif + ] @ allowed_and_current_operations operations @ [ - field ~qualifier:StaticRO ~in_product_since:rel_rio "device" - "order in which VIF backends are created by xapi" + field ~qualifier:StaticRO + ~lifecycle: + [ + ( Published + , rel_rio + , "order in which VIF backends are created by xapi" + ) + ] + "device" "order in which VIF backends are created by xapi" ; field ~qualifier:StaticRO ~ty:(Ref _network) - ~in_product_since:rel_rio "network" - "virtual network to which this vif is connected" - ; field ~qualifier:StaticRO ~ty:(Ref _vm) ~in_product_since:rel_rio + ~lifecycle: + [ + ( Published + , rel_rio + , "virtual network to which this vif is connected" + ) + ] + "network" "virtual network to which this vif is connected" + ; field ~qualifier:StaticRO ~ty:(Ref _vm) + ~lifecycle: + [ + ( Published + , rel_rio + , "virtual machine to which this vif is connected" + ) + ] "VM" "virtual machine to which this vif is connected" - ; field ~qualifier:StaticRO ~ty:String ~in_product_since:rel_rio "MAC" + ; field ~qualifier:StaticRO ~ty:String + ~lifecycle: + [ + ( Published + , rel_rio + , "ethernet MAC address of virtual interface, as exposed to \ + guest" + ) + ] + "MAC" "ethernet MAC address of virtual interface, as exposed to guest" - ; field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_rio "MTU" - "MTU in octets" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~qualifier:StaticRO ~ty:Int + ~lifecycle:[(Published, rel_rio, "MTU in octets")] + "MTU" "MTU in octets" + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "true if the VIF is reserved pending a reboot/migrate" + ) + ] ~internal_only:true ~qualifier:DynamicRO ~ty:Bool "reserved" "true if the VIF is reserved pending a reboot/migrate" ; field ~ty:(Map (String, String)) - ~in_product_since:rel_rio "other_config" - "additional configuration" + ~lifecycle:[(Published, rel_rio, "additional configuration")] + "other_config" "additional configuration" ] @ device_status_fields @ [namespace ~name:"qos" ~contents:(qos "VIF") ()] @@ -3168,44 +3849,113 @@ module VIF = struct ; (Removed, rel_tampa, "Disabled in favour of RRDs") ] "metrics" "metrics associated with this VIF" - ; field ~qualifier:DynamicRO ~in_product_since:rel_george + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_george + , "true if the MAC was autogenerated; false indicates it was \ + set manually" + ) + ] ~default_value:(Some (VBool false)) ~ty:Bool "MAC_autogenerated" "true if the MAC was autogenerated; false indicates it was set \ manually" - ; field ~qualifier:StaticRO ~in_product_since:rel_tampa + ; field ~qualifier:StaticRO + ~lifecycle: + [(Published, rel_tampa, "current locking mode of the VIF")] ~default_value:(Some (VEnum "network_default")) ~ty:locking_mode "locking_mode" "current locking mode of the VIF" - ; field ~qualifier:StaticRO ~in_product_since:rel_tampa + ; field ~qualifier:StaticRO + ~lifecycle: + [ + ( Published + , rel_tampa + , "A list of IPv4 addresses which can be used to filter \ + traffic passing through this VIF" + ) + ] ~default_value:(Some (VSet [])) ~ty:(Set String) "ipv4_allowed" "A list of IPv4 addresses which can be used to filter traffic \ passing through this VIF" - ; field ~qualifier:StaticRO ~in_product_since:rel_tampa + ; field ~qualifier:StaticRO + ~lifecycle: + [ + ( Published + , rel_tampa + , "A list of IPv6 addresses which can be used to filter \ + traffic passing through this VIF" + ) + ] ~default_value:(Some (VSet [])) ~ty:(Set String) "ipv6_allowed" "A list of IPv6 addresses which can be used to filter traffic \ passing through this VIF" - ; field ~ty:ipv4_configuration_mode ~in_product_since:rel_dundee + ; field ~ty:ipv4_configuration_mode + ~lifecycle: + [ + ( Published + , rel_dundee + , "Determines whether IPv4 addresses are configured on the \ + VIF" + ) + ] ~qualifier:DynamicRO "ipv4_configuration_mode" "Determines whether IPv4 addresses are configured on the VIF" ~default_value:(Some (VEnum "None")) - ; field ~ty:(Set String) ~in_product_since:rel_dundee + ; field ~ty:(Set String) + ~lifecycle: + [(Published, rel_dundee, "IPv4 addresses in CIDR format")] ~qualifier:DynamicRO "ipv4_addresses" "IPv4 addresses in CIDR format" ~default_value:(Some (VSet [])) - ; field ~ty:String ~in_product_since:rel_dundee ~qualifier:DynamicRO - "ipv4_gateway" + ; field ~ty:String + ~lifecycle: + [ + ( Published + , rel_dundee + , "IPv4 gateway (the empty string means that no gateway is \ + set)" + ) + ] + ~qualifier:DynamicRO "ipv4_gateway" "IPv4 gateway (the empty string means that no gateway is set)" ~default_value:(Some (VString "")) - ; field ~ty:ipv6_configuration_mode ~in_product_since:rel_dundee + ; field ~ty:ipv6_configuration_mode + ~lifecycle: + [ + ( Published + , rel_dundee + , "Determines whether IPv6 addresses are configured on the \ + VIF" + ) + ] ~qualifier:DynamicRO "ipv6_configuration_mode" "Determines whether IPv6 addresses are configured on the VIF" ~default_value:(Some (VEnum "None")) - ; field ~ty:(Set String) ~in_product_since:rel_dundee + ; field ~ty:(Set String) + ~lifecycle: + [(Published, rel_dundee, "IPv6 addresses in CIDR format")] ~qualifier:DynamicRO "ipv6_addresses" "IPv6 addresses in CIDR format" ~default_value:(Some (VSet [])) - ; field ~ty:String ~in_product_since:rel_dundee ~qualifier:DynamicRO - "ipv6_gateway" + ; field ~ty:String + ~lifecycle: + [ + ( Published + , rel_dundee + , "IPv6 gateway (the empty string means that no gateway is \ + set)" + ) + ] + ~qualifier:DynamicRO "ipv6_gateway" "IPv6 gateway (the empty string means that no gateway is set)" ~default_value:(Some (VString "")) - ; field ~ty:(Ref _pci) ~in_product_since:rel_kolkata + ; field ~ty:(Ref _pci) + ~lifecycle: + [ + ( Published + , rel_kolkata + , "pci of network SR-IOV VF which is reserved for this vif" + ) + ] ~internal_only:true ~qualifier:DynamicRO "reserved_pci" "pci of network SR-IOV VF which is reserved for this vif" ~default_value:(Some (VRef null_ref)) @@ -3234,11 +3984,23 @@ module VIF_metrics = struct ~messages:[] ~contents: [ - uid _vif_metrics + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _vif_metrics ; namespace ~name:"io" ~contents:iobandwidth () - ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:DateTime + ~lifecycle: + [ + ( Published + , rel_rio + , "Time at which this information was last updated" + ) + ] "last_updated" "Time at which this information was last updated" - ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_orlando, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ] @@ -3261,20 +4023,37 @@ module Data_source = struct ~lifecycle:[(Published, rel_rio, "")] ) () - ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Bool + ~lifecycle: + [(Published, rel_rio, "true if the data source is being logged")] "enabled" "true if the data source is being logged" - ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Bool + ~lifecycle: + [ + ( Published + , rel_rio + , "true if the data source is enabled by default. Non-default \ + data sources cannot be disabled" + ) + ] "standard" "true if the data source is enabled by default. Non-default data \ sources cannot be disabled" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle:[(Published, rel_rio, "the units of the value")] "units" "the units of the value" - ; field ~qualifier:DynamicRO ~ty:Float ~in_product_since:rel_rio "min" - "the minimum value of the data source" - ; field ~qualifier:DynamicRO ~ty:Float ~in_product_since:rel_rio "max" - "the maximum value of the data source" - ; field ~qualifier:DynamicRO ~ty:Float ~in_product_since:rel_rio "value" - "current value of the data source" + ; field ~qualifier:DynamicRO ~ty:Float + ~lifecycle: + [(Published, rel_rio, "the minimum value of the data source")] + "min" "the minimum value of the data source" + ; field ~qualifier:DynamicRO ~ty:Float + ~lifecycle: + [(Published, rel_rio, "the maximum value of the data source")] + "max" "the maximum value of the data source" + ; field ~qualifier:DynamicRO ~ty:Float + ~lifecycle: + [(Published, rel_rio, "current value of the data source")] + "value" "current value of the data source" ] () end @@ -3969,7 +4748,10 @@ module SR = struct ] ~contents: ([ - uid _sr + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _sr ; namespace ~name:"name" ~contents: (names oss_since_303 StaticRO @@ -3980,28 +4762,83 @@ module SR = struct @ allowed_and_current_operations operations @ [ field ~ty:(Set (Ref _vdi)) ~qualifier:DynamicRO - ~in_product_since:rel_rio "VDIs" - "all virtual disks known to this storage repository" + ~lifecycle: + [ + ( Published + , rel_rio + , "all virtual disks known to this storage repository" + ) + ] + "VDIs" "all virtual disks known to this storage repository" ; field ~qualifier:DynamicRO ~ty:(Set (Ref _pbd)) - ~in_product_since:rel_rio "PBDs" + ~lifecycle: + [ + ( Published + , rel_rio + , "describes how particular hosts can see this storage \ + repository" + ) + ] + "PBDs" "describes how particular hosts can see this storage repository" - ; field ~ty:Int ~qualifier:DynamicRO ~in_product_since:rel_rio + ; field ~ty:Int ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_rio + , "sum of virtual_sizes of all VDIs in this storage \ + repository (in bytes)" + ) + ] "virtual_allocation" "sum of virtual_sizes of all VDIs in this storage repository (in \ bytes)" - ; field ~ty:Int ~qualifier:DynamicRO ~in_product_since:rel_rio + ; field ~ty:Int ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_rio + , "physical space currently utilised on this storage \ + repository (in bytes). Note that for sparse disk formats, \ + physical_utilisation may be less than virtual_allocation" + ) + ] "physical_utilisation" "physical space currently utilised on this storage repository \ (in bytes). Note that for sparse disk formats, \ physical_utilisation may be less than virtual_allocation" - ; field ~ty:Int ~qualifier:StaticRO ~in_product_since:rel_rio + ; field ~ty:Int ~qualifier:StaticRO + ~lifecycle: + [ + ( Published + , rel_rio + , "total physical size of the repository (in bytes)" + ) + ] "physical_size" "total physical size of the repository (in bytes)" - ; field ~qualifier:StaticRO ~in_product_since:rel_rio "type" - "type of the storage repository" - ; field ~qualifier:StaticRO ~in_product_since:rel_rio "content_type" + ; field ~qualifier:StaticRO + ~lifecycle: + [(Published, rel_rio, "type of the storage repository")] + "type" "type of the storage repository" + ; field ~qualifier:StaticRO + ~lifecycle: + [ + ( Published + , rel_rio + , "the type of the SR's content, if required (e.g. ISOs)" + ) + ] + "content_type" "the type of the SR's content, if required (e.g. ISOs)" ; field ~qualifier:DynamicRO "shared" ~ty:Bool - ~in_product_since:rel_rio + ~lifecycle: + [ + ( Published + , rel_rio + , "true if this SR is (capable of being) shared between \ + multiple hosts" + ) + ] "true if this SR is (capable of being) shared between multiple \ hosts" ; field @@ -4009,25 +4846,55 @@ module SR = struct "other_config" "additional configuration" ~map_keys_roles: [("folder", _R_VM_OP); ("XenCenter.CustomFields.*", _R_VM_OP)] - ~in_product_since:rel_rio - ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando + ~lifecycle:[(Published, rel_rio, "additional configuration")] + ; field ~writer_roles:_R_VM_OP + ~lifecycle: + [ + ( Published + , rel_orlando + , "user-specified tags for categorization purposes" + ) + ] ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes" ; field ~ty:Bool ~qualifier:DynamicRO ~in_oss_since:None - ~in_product_since:rel_rio ~internal_only:true - "default_vdi_visibility" "" + ~lifecycle:[(Published, rel_rio, "")] + ~internal_only:true "default_vdi_visibility" "" ; field ~in_oss_since:None ~ty:(Map (String, String)) - ~in_product_since:rel_miami ~qualifier:RW "sm_config" - "SM dependent data" ~default_value:(Some (VMap [])) - ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando + ~lifecycle:[(Published, rel_miami, "SM dependent data")] + ~qualifier:RW "sm_config" "SM dependent data" + ~default_value:(Some (VMap [])) + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_orlando + , "Binary blobs associated with this SR" + ) + ] ~ty:(Map (String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this SR" - ; field ~qualifier:DynamicRO ~in_product_since:rel_cowley ~ty:Bool - ~default_value:(Some (VBool false)) "local_cache_enabled" + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_cowley + , "True if this SR is assigned to be the local cache for its \ + host" + ) + ] + ~ty:Bool ~default_value:(Some (VBool false)) "local_cache_enabled" "True if this SR is assigned to be the local cache for its host" - ; field ~qualifier:DynamicRO ~in_product_since:rel_boston + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_boston + , "The disaster recovery task which introduced this SR" + ) + ] ~ty:(Ref _dr_task) ~default_value:(Some (VRef null_ref)) "introduced_by" "The disaster recovery task which introduced this SR" @@ -4063,24 +4930,51 @@ module SM = struct ~messages_default_allowed_roles:_R_POOL_OP ~messages:[] ~contents: [ - uid _sm + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _sm ; namespace ~name:"name" ~contents: (names None DynamicRO ~lifecycle:[(Published, rel_rio, "")]) () - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "SR.type")] ~qualifier:DynamicRO "type" "SR.type" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Vendor who created this plugin")] ~qualifier:DynamicRO "vendor" "Vendor who created this plugin" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Entity which owns the copyright of this plugin" + ) + ] ~qualifier:DynamicRO "copyright" "Entity which owns the copyright of this plugin" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Version of the plugin")] ~qualifier:DynamicRO "version" "Version of the plugin" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "Minimum SM API version required on the server" + ) + ] ~qualifier:DynamicRO "required_api_version" "Minimum SM API version required on the server" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "names and descriptions of device config keys" + ) + ] ~qualifier:DynamicRO ~ty:(Map (String, String)) "configuration" "names and descriptions of device config keys" @@ -4093,20 +4987,39 @@ module SM = struct ~ty:(Set String) "capabilities" "capabilities of the SM plugin" ~default_value:(Some (VSet [])) ; field ~in_oss_since:None ~qualifier:DynamicRO - ~in_product_since:rel_clearwater + ~lifecycle: + [ + ( Published + , rel_clearwater + , "capabilities of the SM plugin, with capability version \ + numbers" + ) + ] ~ty:(Map (String, Int)) "features" "capabilities of the SM plugin, with capability version numbers" ~default_value:(Some (VMap [])) - ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_miami, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" - ; field ~in_product_since:rel_orlando ~qualifier:DynamicRO - ~default_value:(Some (VString "")) ~ty:String "driver_filename" - "filename of the storage driver" - ; field ~in_product_since:rel_dundee ~qualifier:DynamicRO - ~default_value:(Some (VSet [])) ~ty:(Set String) - "required_cluster_stack" + ; field + ~lifecycle: + [(Published, rel_orlando, "filename of the storage driver")] + ~qualifier:DynamicRO ~default_value:(Some (VString "")) ~ty:String + "driver_filename" "filename of the storage driver" + ; field + ~lifecycle: + [ + ( Published + , rel_dundee + , "The storage plugin requires that one of these cluster \ + stacks is configured and running." + ) + ] + ~qualifier:DynamicRO ~default_value:(Some (VSet [])) + ~ty:(Set String) "required_cluster_stack" "The storage plugin requires that one of these cluster stacks is \ configured and running." ] @@ -4162,7 +5075,13 @@ module LVHD = struct ~descr:"LVHD SR specific operations" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_ADMIN ~messages:[enable_thin_provisioning] - ~contents:[uid _lvhd] + ~contents: + [ + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _lvhd + ] () end @@ -5215,7 +6134,10 @@ module VDI = struct ] ~contents: ([ - uid _vdi + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _vdi ; namespace ~name:"name" ~contents: (names oss_since_303 StaticRO @@ -5225,46 +6147,103 @@ module VDI = struct ] @ allowed_and_current_operations operations @ [ - field ~qualifier:StaticRO ~ty:(Ref _sr) ~in_product_since:rel_rio + field ~qualifier:StaticRO ~ty:(Ref _sr) + ~lifecycle: + [ + ( Published + , rel_rio + , "storage repository in which the VDI resides" + ) + ] "SR" "storage repository in which the VDI resides" ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vbd)) - ~in_product_since:rel_rio "VBDs" - "list of vbds that refer to this disk" + ~lifecycle: + [(Published, rel_rio, "list of vbds that refer to this disk")] + "VBDs" "list of vbds that refer to this disk" ; field ~qualifier:DynamicRO ~ty:(Set (Ref _crashdump)) - ~in_product_since:rel_rio "crash_dumps" - "list of crash dumps that refer to this disk" - ; field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_rio + ~lifecycle: + [ + ( Published + , rel_rio + , "list of crash dumps that refer to this disk" + ) + ] + "crash_dumps" "list of crash dumps that refer to this disk" + ; field ~qualifier:StaticRO ~ty:Int + ~lifecycle: + [ + ( Published + , rel_rio + , "size of disk as presented to the guest (in bytes). Note \ + that, depending on storage backend type, requested size \ + may not be respected exactly" + ) + ] "virtual_size" "size of disk as presented to the guest (in bytes). Note that, \ depending on storage backend type, requested size may not be \ respected exactly" - ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Int + ~lifecycle: + [ + ( Published + , rel_rio + , "amount of physical space that the disk image is currently \ + taking up on the storage repository (in bytes)" + ) + ] "physical_utilisation" "amount of physical space that the disk image is currently \ taking up on the storage repository (in bytes)" - ; field ~qualifier:StaticRO ~ty:type' ~in_product_since:rel_rio "type" - "type of the VDI" - ; field ~qualifier:StaticRO ~ty:Bool ~in_product_since:rel_rio + ; field ~qualifier:StaticRO ~ty:type' + ~lifecycle:[(Published, rel_rio, "type of the VDI")] + "type" "type of the VDI" + ; field ~qualifier:StaticRO ~ty:Bool + ~lifecycle: + [(Published, rel_rio, "true if this disk may be shared")] "sharable" "true if this disk may be shared" - ; field ~qualifier:StaticRO ~ty:Bool ~in_product_since:rel_rio + ; field ~qualifier:StaticRO ~ty:Bool + ~lifecycle: + [ + ( Published + , rel_rio + , "true if this disk may ONLY be mounted read-only" + ) + ] "read_only" "true if this disk may ONLY be mounted read-only" ; field ~ty:(Map (String, String)) - ~in_product_since:rel_rio "other_config" - "additional configuration" + ~lifecycle:[(Published, rel_rio, "additional configuration")] + "other_config" "additional configuration" ~map_keys_roles: [("folder", _R_VM_OP); ("XenCenter.CustomFields.*", _R_VM_OP)] ; field ~qualifier:DynamicRO ~ty:Bool "storage_lock" - ~in_product_since:rel_rio + ~lifecycle: + [ + ( Published + , rel_rio + , "true if this disk is locked at the storage level" + ) + ] "true if this disk is locked at the storage level" ; (* XXX: location field was in the database in rio, now API in miami *) - field ~in_oss_since:None ~in_product_since:rel_miami ~ty:String - ~qualifier:DynamicRO ~default_value:(Some (VString "")) "location" - "location information" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~ty:Bool - ~qualifier:DynamicRO "managed" "" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~ty:Bool - ~qualifier:DynamicRO "missing" + field ~in_oss_since:None + ~lifecycle:[(Published, rel_miami, "location information")] + ~ty:String ~qualifier:DynamicRO ~default_value:(Some (VString "")) + "location" "location information" + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "")] + ~ty:Bool ~qualifier:DynamicRO "managed" "" + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "true if SR scan operation reported this VDI as not \ + present on disk" + ) + ] + ~ty:Bool ~qualifier:DynamicRO "missing" "true if SR scan operation reported this VDI as not present on \ disk" ; field ~in_oss_since:None ~ty:(Ref _vdi) ~qualifier:DynamicRO @@ -5276,7 +6255,17 @@ module VDI = struct "parent" "This field is always null. Deprecated" ; field ~in_oss_since:None ~ty:(Map (String, String)) - ~in_product_since:rel_miami ~qualifier:RW "xenstore_data" + ~lifecycle: + [ + ( Published + , rel_miami + , "data to be inserted into the xenstore tree \ + (/local/domain/0/backend/vbd///sm-data) \ + after the VDI is attached. This is generally set by the \ + SM backends on vdi_attach." + ) + ] + ~qualifier:RW "xenstore_data" "data to be inserted into the xenstore tree \ (/local/domain/0/backend/vbd///sm-data) after \ the VDI is attached. This is generally set by the SM backends \ @@ -5284,37 +6273,101 @@ module VDI = struct ~default_value:(Some (VMap [])) ; field ~in_oss_since:None ~ty:(Map (String, String)) - ~in_product_since:rel_miami ~qualifier:RW "sm_config" - "SM dependent data" ~default_value:(Some (VMap [])) - ; field ~in_product_since:rel_orlando + ~lifecycle:[(Published, rel_miami, "SM dependent data")] + ~qualifier:RW "sm_config" "SM dependent data" + ~default_value:(Some (VMap [])) + ; field + ~lifecycle: + [(Published, rel_orlando, "true if this is a snapshot.")] ~default_value:(Some (VBool false)) ~qualifier:DynamicRO ~ty:Bool ~doc_tags:[Snapshots] "is_a_snapshot" "true if this is a snapshot." - ; field ~in_product_since:rel_orlando ~default_value:(Some (VRef "")) - ~qualifier:DynamicRO ~ty:(Ref _vdi) ~doc_tags:[Snapshots] - "snapshot_of" "Ref pointing to the VDI this snapshot is of." - ; field ~in_product_since:rel_orlando ~qualifier:DynamicRO - ~ty:(Set (Ref _vdi)) ~doc_tags:[Snapshots] "snapshots" - "List pointing to all the VDIs snapshots." - ; field ~in_product_since:rel_orlando + ; field + ~lifecycle: + [ + ( Published + , rel_orlando + , "Ref pointing to the VDI this snapshot is of." + ) + ] + ~default_value:(Some (VRef "")) ~qualifier:DynamicRO + ~ty:(Ref _vdi) ~doc_tags:[Snapshots] "snapshot_of" + "Ref pointing to the VDI this snapshot is of." + ; field + ~lifecycle: + [ + ( Published + , rel_orlando + , "List pointing to all the VDIs snapshots." + ) + ] + ~qualifier:DynamicRO ~ty:(Set (Ref _vdi)) ~doc_tags:[Snapshots] + "snapshots" "List pointing to all the VDIs snapshots." + ; field + ~lifecycle: + [ + ( Published + , rel_orlando + , "Date/time when this snapshot was created." + ) + ] ~default_value:(Some (VDateTime Date.epoch)) ~qualifier:DynamicRO ~ty:DateTime ~doc_tags:[Snapshots] "snapshot_time" "Date/time when this snapshot was created." - ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando + ; field ~writer_roles:_R_VM_OP + ~lifecycle: + [ + ( Published + , rel_orlando + , "user-specified tags for categorization purposes" + ) + ] ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes" - ; field ~in_product_since:rel_cowley ~qualifier:DynamicRO ~ty:Bool - ~default_value:(Some (VBool false)) "allow_caching" + ; field + ~lifecycle: + [ + ( Published + , rel_cowley + , "true if this VDI is to be cached in the local cache SR" + ) + ] + ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) + "allow_caching" "true if this VDI is to be cached in the local cache SR" - ; field ~in_product_since:rel_cowley ~qualifier:DynamicRO ~ty:on_boot + ; field + ~lifecycle: + [ + ( Published + , rel_cowley + , "The behaviour of this VDI on a VM boot" + ) + ] + ~qualifier:DynamicRO ~ty:on_boot ~default_value:(Some (VEnum "persist")) "on_boot" "The behaviour of this VDI on a VM boot" - ; field ~in_product_since:rel_boston ~qualifier:DynamicRO - ~ty:(Ref _pool) ~default_value:(Some (VRef null_ref)) - "metadata_of_pool" + ; field + ~lifecycle: + [ + ( Published + , rel_boston + , "The pool whose metadata is contained in this VDI" + ) + ] + ~qualifier:DynamicRO ~ty:(Ref _pool) + ~default_value:(Some (VRef null_ref)) "metadata_of_pool" "The pool whose metadata is contained in this VDI" - ; field ~in_product_since:rel_boston ~qualifier:DynamicRO ~ty:Bool - ~default_value:(Some (VBool false)) "metadata_latest" + ; field + ~lifecycle: + [ + ( Published + , rel_boston + , "Whether this VDI contains the latest known accessible \ + metadata for the pool" + ) + ] + ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) + "metadata_latest" "Whether this VDI contains the latest known accessible metadata \ for the pool" ; field @@ -5576,12 +6629,19 @@ module VBD = struct ; set_mode ] ~contents: - ([uid _vbd] + ([ + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _vbd + ] @ allowed_and_current_operations operations @ [ - field ~qualifier:StaticRO ~ty:(Ref _vm) ~in_product_since:rel_rio + field ~qualifier:StaticRO ~ty:(Ref _vm) + ~lifecycle:[(Published, rel_rio, "the virtual machine")] "VM" "the virtual machine" - ; field ~qualifier:StaticRO ~ty:(Ref _vdi) ~in_product_since:rel_rio + ; field ~qualifier:StaticRO ~ty:(Ref _vdi) + ~lifecycle:[(Published, rel_rio, "the virtual disk")] "VDI" "the virtual disk" ; field ~qualifier:StaticRO ~ty:String ~default_value:(Some (VString "")) @@ -5595,29 +6655,71 @@ module VBD = struct ) ] "device" "device seen by the guest e.g. hda1" - ; field ~in_product_since:rel_rio "userdevice" - "user-friendly device name e.g. 0,1,2,etc." - ; field ~ty:Bool ~in_product_since:rel_rio "bootable" - "true if this VBD is bootable" - ; field ~qualifier:StaticRO ~ty:mode ~in_product_since:rel_rio "mode" - "the mode the VBD should be mounted with" - ; field ~ty:type' ~in_product_since:rel_rio "type" - "how the VBD will appear to the guest (e.g. disk or CD)" - ; field ~in_oss_since:None ~in_product_since:rel_miami ~ty:Bool - ~default_value:(Some (VBool true)) "unpluggable" + ; field + ~lifecycle: + [ + ( Published + , rel_rio + , "user-friendly device name e.g. 0,1,2,etc." + ) + ] + "userdevice" "user-friendly device name e.g. 0,1,2,etc." + ; field ~ty:Bool + ~lifecycle:[(Published, rel_rio, "true if this VBD is bootable")] + "bootable" "true if this VBD is bootable" + ; field ~qualifier:StaticRO ~ty:mode + ~lifecycle: + [ + (Published, rel_rio, "the mode the VBD should be mounted with") + ] + "mode" "the mode the VBD should be mounted with" + ; field ~ty:type' + ~lifecycle: + [ + ( Published + , rel_rio + , "how the VBD will appear to the guest (e.g. disk or CD)" + ) + ] + "type" "how the VBD will appear to the guest (e.g. disk or CD)" + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_miami + , "true if this VBD will support hot-unplug" + ) + ] + ~ty:Bool ~default_value:(Some (VBool true)) "unpluggable" "true if this VBD will support hot-unplug" - ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Bool + ~lifecycle: + [ + ( Published + , rel_rio + , "true if a storage level lock was acquired" + ) + ] "storage_lock" "true if a storage level lock was acquired" - ; field ~qualifier:StaticRO ~ty:Bool ~in_product_since:rel_rio "empty" - "if true this represents an empty drive" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~qualifier:StaticRO ~ty:Bool + ~lifecycle: + [(Published, rel_rio, "if true this represents an empty drive")] + "empty" "if true this represents an empty drive" + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "true if the VBD is reserved pending a reboot/migrate" + ) + ] ~internal_only:true ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "reserved" "true if the VBD is reserved pending a reboot/migrate" ; field ~ty:(Map (String, String)) - ~in_product_since:rel_rio "other_config" - "additional configuration" + ~lifecycle:[(Published, rel_rio, "additional configuration")] + "other_config" "additional configuration" ] @ device_status_fields @ [namespace ~name:"qos" ~contents:(qos "VBD") ()] @@ -5655,7 +6757,10 @@ module VBD_metrics = struct ~messages_default_allowed_roles:_R_VM_ADMIN ~messages:[] ~contents: [ - uid _vbd_metrics + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _vbd_metrics ; namespace ~name:"io" ~contents:iobandwidth () ; field ~qualifier:DynamicRO ~ty:DateTime ~default_value:(Some (VDateTime Date.epoch)) @@ -5701,12 +6806,19 @@ module Crashdump = struct ~messages_default_allowed_roles:_R_POOL_OP ~messages:[destroy] ~contents: [ - uid _crashdump - ; field ~qualifier:StaticRO ~ty:(Ref _vm) ~in_product_since:rel_rio "VM" - "the virtual machine" - ; field ~qualifier:StaticRO ~ty:(Ref _vdi) ~in_product_since:rel_rio + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _crashdump + ; field ~qualifier:StaticRO ~ty:(Ref _vm) + ~lifecycle:[(Published, rel_rio, "the virtual machine")] + "VM" "the virtual machine" + ; field ~qualifier:StaticRO ~ty:(Ref _vdi) + ~lifecycle:[(Published, rel_rio, "the virtual disk")] "VDI" "the virtual disk" - ; field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_miami, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ] @@ -5897,16 +7009,36 @@ module Subject = struct ~messages:[add_to_roles; remove_from_roles; get_permissions_name_label] ~contents: [ - uid ~in_oss_since:None _subject - ; field ~in_product_since:rel_george ~default_value:(Some (VString "")) - ~qualifier:StaticRO ~ty:String "subject_identifier" + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + ~in_oss_since:None _subject + ; field + ~lifecycle: + [ + ( Published + , rel_george + , "the subject identifier, unique in the external directory \ + service" + ) + ] + ~default_value:(Some (VString "")) ~qualifier:StaticRO ~ty:String + "subject_identifier" "the subject identifier, unique in the external directory service" - ; field ~in_product_since:rel_george ~default_value:(Some (VMap [])) - ~qualifier:StaticRO + ; field + ~lifecycle:[(Published, rel_george, "additional configuration")] + ~default_value:(Some (VMap [])) ~qualifier:StaticRO ~ty:(Map (String, String)) "other_config" "additional configuration" ; (* DynamicRO fields do not show up in the constructor, as it should be because a subject must be created without receiving any roles as a parameter *) - field ~in_product_since:rel_midnight_ride + field + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "the roles associated with this subject" + ) + ] ~default_value: (Some (VSet [VRef ("OpaqueRef:" ^ Constants.rbac_pool_admin_uuid)]) ) @@ -6006,24 +7138,52 @@ module Role = struct ] ~contents: [ - uid ~in_oss_since:None _role + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + ~in_oss_since:None _role ; namespace ~name:"name" ~contents: [ - field ~in_product_since:rel_midnight_ride + field + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "a short user-friendly name for the role" + ) + ] ~default_value:(Some (VString "")) ~qualifier:StaticRO ~ty:String "label" "a short user-friendly name for the role" - ; field ~in_product_since:rel_midnight_ride + ; field + ~lifecycle: + [(Published, rel_midnight_ride, "what this role is for")] ~default_value:(Some (VString "")) ~qualifier:StaticRO ~ty:String "description" "what this role is for" ] () - ; field ~in_product_since:rel_midnight_ride + ; field + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "a list of pointers to other roles or permissions" + ) + ] ~default_value:(Some (VSet [])) ~ignore_foreign_key:true ~qualifier:StaticRO ~ty:(Set (Ref _role)) "subroles" "a list of pointers to other roles or permissions" - ; field ~in_product_since:"22.5.0" ~default_value:(Some (VBool false)) - ~qualifier:DynamicRO ~ty:Bool "is_internal" + ; field + ~lifecycle: + [ + ( Published + , "22.5.0" + , "Indicates whether the role is only to be assigned \ + internally by xapi, or can be used by clients" + ) + ] + ~default_value:(Some (VBool false)) ~qualifier:DynamicRO ~ty:Bool + "is_internal" "Indicates whether the role is only to be assigned internally by \ xapi, or can be used by clients" (*RBAC2: field ~in_product_since:rel_midnight_ride ~default_value:(Some (VBool false)) ~qualifier:StaticRO ~ty:Bool "is_complete" "if this is a complete role, meant to be used by the end-user";*) @@ -6053,18 +7213,34 @@ module Console = struct ~messages_default_allowed_roles:_R_VM_ADMIN ~messages:[] ~contents: [ - uid _console - ; field ~qualifier:DynamicRO ~ty:protocol ~in_product_since:rel_rio + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _console + ; field ~qualifier:DynamicRO ~ty:protocol + ~lifecycle: + [(Published, rel_rio, "the protocol used by this console")] "protocol" "the protocol used by this console" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle:[(Published, rel_rio, "URI for the console service")] "location" "URI for the console service" - ; field ~qualifier:DynamicRO ~ty:(Ref _vm) ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:(Ref _vm) + ~lifecycle: + [(Published, rel_rio, "VM to which this console is attached")] "VM" "VM to which this console is attached" ; field ~ty:(Map (String, String)) - ~in_product_since:rel_rio "other_config" "additional configuration" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~internal_only:true - ~ty:Int "port" + ~lifecycle:[(Published, rel_rio, "additional configuration")] + "other_config" "additional configuration" + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "port in dom0 on which the console server is listening" + ) + ] + ~internal_only:true ~ty:Int "port" "port in dom0 on which the console server is listening" ] () @@ -6073,14 +7249,16 @@ end module VM_metrics = struct let vm_memory_metrics = [ - field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "actual" - "Guest's actual memory (bytes)" ~persist:false + field ~qualifier:DynamicRO ~ty:Int + ~lifecycle:[(Published, rel_rio, "Guest's actual memory (bytes)")] + "actual" "Guest's actual memory (bytes)" ~persist:false ] let vm_vcpu_metrics = [ - field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "number" - "Current number of VCPUs" ~persist:true + field ~qualifier:DynamicRO ~ty:Int + ~lifecycle:[(Published, rel_rio, "Current number of VCPUs")] + "number" "Current number of VCPUs" ~persist:true ; field ~qualifier:DynamicRO ~ty:(Map (Int, Float)) ~persist:false "utilisation" @@ -6094,15 +7272,17 @@ module VM_metrics = struct ] ; field ~qualifier:DynamicRO ~ty:(Map (Int, Int)) - ~in_product_since:rel_rio "CPU" "VCPU to PCPU map" ~persist:false + ~lifecycle:[(Published, rel_rio, "VCPU to PCPU map")] + "CPU" "VCPU to PCPU map" ~persist:false ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - ~in_product_since:rel_rio "params" - "The live equivalent to VM.VCPUs_params" ~persist:false + ~lifecycle: + [(Published, rel_rio, "The live equivalent to VM.VCPUs_params")] + "params" "The live equivalent to VM.VCPUs_params" ~persist:false ; field ~qualifier:DynamicRO ~ty:(Map (Int, Set String)) - ~in_product_since:rel_rio "flags" "CPU flags (blocked,online,running)" - ~persist:false + ~lifecycle:[(Published, rel_rio, "CPU flags (blocked,online,running)")] + "flags" "CPU flags (blocked,online,running)" ~persist:false ] let t = @@ -6114,32 +7294,65 @@ module VM_metrics = struct ~messages_default_allowed_roles:_R_VM_ADMIN ~messages:[] ~contents: [ - uid _vm_metrics + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _vm_metrics ; namespace ~name:"memory" ~contents:vm_memory_metrics () ; namespace ~name:"VCPUs" ~contents:vm_vcpu_metrics () - ; field ~qualifier:DynamicRO ~ty:(Set String) ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:(Set String) + ~lifecycle: + [ + ( Published + , rel_rio + , "The state of the guest, eg blocked, dying etc" + ) + ] "state" "The state of the guest, eg blocked, dying etc" ~persist:false - ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:DateTime + ~lifecycle: + [(Published, rel_rio, "Time at which this VM was last booted")] "start_time" "Time at which this VM was last booted" ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:DateTime - ~in_product_since:rel_rio "install_time" - "Time at which the VM was installed" - ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + ~lifecycle: + [(Published, rel_rio, "Time at which the VM was installed")] + "install_time" "Time at which the VM was installed" + ; field ~qualifier:DynamicRO ~ty:DateTime + ~lifecycle: + [ + ( Published + , rel_rio + , "Time at which this information was last updated" + ) + ] "last_updated" "Time at which this information was last updated" ~persist:false - ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_orlando, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" ~persist:false - ; field ~in_product_since:rel_ely ~default_value:(Some (VBool false)) - ~ty:Bool ~qualifier:DynamicRO "hvm" "hardware virtual machine" + ; field + ~lifecycle:[(Published, rel_ely, "hardware virtual machine")] + ~default_value:(Some (VBool false)) ~ty:Bool ~qualifier:DynamicRO + "hvm" "hardware virtual machine" ~persist:false + ; field + ~lifecycle: + [(Published, rel_ely, "VM supports nested virtualisation")] + ~default_value:(Some (VBool false)) ~ty:Bool ~qualifier:DynamicRO + "nested_virt" "VM supports nested virtualisation" ~persist:false + ; field + ~lifecycle: + [ + ( Published + , rel_ely + , "VM is immobile and can't migrate between hosts" + ) + ] + ~default_value:(Some (VBool false)) ~ty:Bool ~qualifier:DynamicRO + "nomigrate" "VM is immobile and can't migrate between hosts" ~persist:false - ; field ~in_product_since:rel_ely ~default_value:(Some (VBool false)) - ~ty:Bool ~qualifier:DynamicRO "nested_virt" - "VM supports nested virtualisation" ~persist:false - ; field ~in_product_since:rel_ely ~default_value:(Some (VBool false)) - ~ty:Bool ~qualifier:DynamicRO "nomigrate" - "VM is immobile and can't migrate between hosts" ~persist:false ; field ~lifecycle: [ @@ -6187,18 +7400,22 @@ module VM_guest_metrics = struct ~messages_default_allowed_roles:_R_VM_ADMIN ~messages:[] ~contents: [ - uid _vm_guest_metrics + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _vm_guest_metrics ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - ~in_product_since:rel_rio "os_version" "version of the OS" + ~lifecycle:[(Published, rel_rio, "version of the OS")] + "os_version" "version of the OS" ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) ~lifecycle:[] "netbios_name" "The NETBIOS name of the machine" ~default_value:(Some (VMap [])) ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - ~in_product_since:rel_rio "PV_drivers_version" - "version of the PV drivers" + ~lifecycle:[(Published, rel_rio, "version of the PV drivers")] + "PV_drivers_version" "version of the PV drivers" ; field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None ~lifecycle: [ @@ -6241,16 +7458,35 @@ module VM_guest_metrics = struct "disks" "This field exists but has no data." ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - ~in_product_since:rel_rio "networks" "network configuration" + ~lifecycle:[(Published, rel_rio, "network configuration")] + "networks" "network configuration" ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - ~in_product_since:rel_rio "other" "anything else" - ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + ~lifecycle:[(Published, rel_rio, "anything else")] + "other" "anything else" + ; field ~qualifier:DynamicRO ~ty:DateTime + ~lifecycle: + [ + ( Published + , rel_rio + , "Time at which this information was last updated" + ) + ] "last_updated" "Time at which this information was last updated" - ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) + ; field + ~lifecycle:[(Published, rel_orlando, "additional configuration")] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "other_config" "additional configuration" - ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_orlando + , "True if the guest is sending heartbeat messages via the \ + guest agent" + ) + ] ~default_value:(Some (VBool false)) ~ty:Bool "live" "True if the guest is sending heartbeat messages via the guest \ agent" @@ -6875,33 +8111,69 @@ module VMSS = struct ] ~contents: [ - uid _vmss + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _vmss ; namespace ~name:"name" ~contents:(names None RW ~lifecycle:[(Published, rel_rio, "")]) () - ; field ~qualifier:RW ~ty:Bool ~in_product_since:rel_rio "enabled" - "enable or disable this snapshot schedule" + ; field ~qualifier:RW ~ty:Bool + ~lifecycle: + [(Published, rel_rio, "enable or disable this snapshot schedule")] + "enabled" "enable or disable this snapshot schedule" ~default_value:(Some (VBool true)) - ; field ~qualifier:StaticRO ~ty:type' ~in_product_since:rel_rio "type" - "type of the snapshot schedule" - ; field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_rio + ; field ~qualifier:StaticRO ~ty:type' + ~lifecycle:[(Published, rel_rio, "type of the snapshot schedule")] + "type" "type of the snapshot schedule" + ; field ~qualifier:StaticRO ~ty:Int + ~lifecycle: + [ + ( Published + , rel_rio + , "maximum number of snapshots that should be stored at any \ + time" + ) + ] "retained_snapshots" "maximum number of snapshots that should be stored at any time" ~default_value:(Some (VInt 7L)) - ; field ~qualifier:StaticRO ~ty:frequency ~in_product_since:rel_rio + ; field ~qualifier:StaticRO ~ty:frequency + ~lifecycle: + [ + ( Published + , rel_rio + , "frequency of taking snapshot from snapshot schedule" + ) + ] "frequency" "frequency of taking snapshot from snapshot schedule" ; field ~qualifier:StaticRO ~ty:(Map (String, String)) - ~in_product_since:rel_rio "schedule" + ~lifecycle: + [ + ( Published + , rel_rio + , "schedule of the snapshot containing 'hour', 'min', 'days'. \ + Date/time-related information is in Local Timezone" + ) + ] + "schedule" "schedule of the snapshot containing 'hour', 'min', 'days'. \ Date/time-related information is in Local Timezone" ~default_value:(Some (VMap [])) - ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:DateTime + ~lifecycle:[(Published, rel_rio, "time of the last snapshot")] "last_run_time" "time of the last snapshot" ~default_value:(Some (VDateTime Date.epoch)) ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) - ~in_product_since:rel_rio "VMs" - "all VMs attached to this snapshot schedule" + ~lifecycle: + [ + ( Published + , rel_rio + , "all VMs attached to this snapshot schedule" + ) + ] + "VMs" "all VMs attached to this snapshot schedule" ] () end @@ -7066,7 +8338,10 @@ module VM_appliance = struct ] ~contents: ([ - uid _vm_appliance + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _vm_appliance ; namespace ~name:"name" ~contents:(names None RW ~lifecycle:[(Published, rel_rio, "")]) () @@ -7074,7 +8349,8 @@ module VM_appliance = struct @ allowed_and_current_operations operations @ [ field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) - ~in_product_since:rel_rio "VMs" "all VMs in this appliance" + ~lifecycle:[(Published, rel_rio, "all VMs in this appliance")] + "VMs" "all VMs in this appliance" ] ) () @@ -7132,10 +8408,14 @@ module DR_task = struct ~messages_default_allowed_roles:_R_POOL_OP ~messages:[create; destroy] ~contents: [ - uid _dr_task + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _dr_task ; field ~qualifier:DynamicRO ~ty:(Set (Ref _sr)) - ~in_product_since:rel_rio "introduced_SRs" - "All SRs introduced by this appliance" + ~lifecycle: + [(Published, rel_rio, "All SRs introduced by this appliance")] + "introduced_SRs" "All SRs introduced by this appliance" ] () end @@ -7332,22 +8612,46 @@ module Event = struct ; contents= [ field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:Int - ~in_product_since:rel_rio "id" + ~lifecycle: + [ + ( Published + , rel_rio + , "An ID, monotonically increasing, and local to the current \ + session" + ) + ] + "id" "An ID, monotonically increasing, and local to the current session" ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:DateTime - ~in_product_since:rel_rio ~internal_deprecated_since:rel_boston + ~lifecycle: + [ + (Published, rel_rio, "The time at which the event occurred") + ; (Deprecated, rel_boston, "") + ] "timestamp" "The time at which the event occurred" ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:String - ~in_product_since:rel_rio "class" - "The name of the class of the object that changed" + ~lifecycle: + [ + ( Published + , rel_rio + , "The name of the class of the object that changed" + ) + ] + "class" "The name of the class of the object that changed" ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:operation - ~in_product_since:rel_rio "operation" - "The operation that was performed" + ~lifecycle: + [(Published, rel_rio, "The operation that was performed")] + "operation" "The operation that was performed" ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:String - ~in_product_since:rel_rio "ref" - "A reference to the object that changed" + ~lifecycle: + [(Published, rel_rio, "A reference to the object that changed")] + "ref" "A reference to the object that changed" ; field ~reader_roles:_R_ALL ~qualifier:StaticRO ~ty:String - ~in_product_since:rel_rio ~internal_deprecated_since:rel_boston + ~lifecycle: + [ + (Published, rel_rio, "The uuid of the object that changed") + ; (Deprecated, rel_boston, "") + ] "obj_uuid" "The uuid of the object that changed" ] ; (* As of tampa, the event record has one more field, snapshot, which is the record of the object changed. @@ -7408,19 +8712,43 @@ module Blob = struct ~messages_default_allowed_roles:_R_POOL_OP ~messages:[create; destroy] ~contents: [ - uid _blob + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _blob ; namespace ~name:"name" ~contents: (names oss_since_303 RW ~lifecycle:[(Published, rel_rio, "")]) () - ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio "size" - "Size of the binary data, in bytes" + ; field ~qualifier:DynamicRO ~ty:Int + ~lifecycle: + [(Published, rel_rio, "Size of the binary data, in bytes")] + "size" "Size of the binary data, in bytes" ; field ~writer_roles:_R_POOL_OP ~qualifier:RW - ~in_product_since:rel_tampa ~default_value:(Some (VBool false)) - ~ty:Bool "public" "True if the blob is publicly accessible" - ; field ~qualifier:StaticRO ~ty:DateTime ~in_product_since:rel_rio + ~lifecycle: + [ + (Published, rel_tampa, "True if the blob is publicly accessible") + ] + ~default_value:(Some (VBool false)) ~ty:Bool "public" + "True if the blob is publicly accessible" + ; field ~qualifier:StaticRO ~ty:DateTime + ~lifecycle: + [ + ( Published + , rel_rio + , "Time at which the data in the blob was last updated" + ) + ] "last_updated" "Time at which the data in the blob was last updated" - ; field ~qualifier:StaticRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:StaticRO ~ty:String + ~lifecycle: + [ + ( Published + , rel_rio + , "The mime type associated with this object. Defaults to \ + 'application/octet-stream' if the empty string is supplied" + ) + ] "mime_type" "The mime type associated with this object. Defaults to \ 'application/octet-stream' if the empty string is supplied" @@ -7572,10 +8900,21 @@ module Message = struct ] ~contents: [ - uid _message - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio "name" - "The name of the message" - ; field ~qualifier:DynamicRO ~ty:Int ~in_product_since:rel_rio + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _message + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle:[(Published, rel_rio, "The name of the message")] + "name" "The name of the message" + ; field ~qualifier:DynamicRO ~ty:Int + ~lifecycle: + [ + ( Published + , rel_rio + , "The message priority, 0 being low priority" + ) + ] "priority" "The message priority, 0 being low priority" ; field ~qualifier:DynamicRO ~ty:cls ~lifecycle: @@ -7584,12 +8923,24 @@ module Message = struct ; (Extended, "1.313.0", "Added Certificate class") ] "cls" "The class of the object this message is associated with" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle: + [ + ( Published + , rel_rio + , "The uuid of the object this message is associated with" + ) + ] "obj_uuid" "The uuid of the object this message is associated with" - ; field ~qualifier:DynamicRO ~ty:DateTime ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:DateTime + ~lifecycle: + [ + (Published, rel_rio, "The time at which the message was created") + ] "timestamp" "The time at which the message was created" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio "body" - "The body of the message" + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle:[(Published, rel_rio, "The body of the message")] + "body" "The body of the message" ] () end @@ -7635,13 +8986,17 @@ module Secret = struct ~persist:PersistEverything ~contents: [ - uid ~reader_roles:_R_POOL_OP _secret + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + ~reader_roles:_R_POOL_OP _secret ; field ~reader_roles:_R_POOL_OP ~qualifier:RW ~ty:String - ~in_product_since:rel_rio "value" "the secret" + ~lifecycle:[(Published, rel_rio, "the secret")] + "value" "the secret" ; field ~qualifier:RW ~ty:(Map (String, String)) - ~in_product_since:rel_rio "other_config" "other_config" - ~default_value:(Some (VMap [])) + ~lifecycle:[(Published, rel_rio, "other_config")] + "other_config" "other_config" ~default_value:(Some (VMap [])) ] () end @@ -7705,7 +9060,10 @@ module Network_sriov = struct ~in_oss_since:None ~contents: [ - uid _network_sriov + uid + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _network_sriov ; field ~qualifier:StaticRO ~ty:(Ref _pif) ~lifecycle "physical_PIF" "The PIF that has SR-IOV enabled" ~default_value:(Some (VRef "")) ; field ~qualifier:StaticRO ~ty:(Ref _pif) ~lifecycle "logical_PIF" @@ -8978,7 +10336,9 @@ module VUSB = struct ~ty:(Map (String, String)) ~lifecycle "other_config" "Additional configuration" ~default_value:(Some (VMap [])) - ; field ~qualifier:DynamicRO ~ty:Bool ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Bool + ~lifecycle: + [(Published, rel_rio, "is the device currently attached")] "currently_attached" "is the device currently attached" ~default_value:(Some (VBool false)) ] diff --git a/ocaml/idl/datamodel_common.ml b/ocaml/idl/datamodel_common.ml index 3ee1bcebd99..6ae642be5b3 100644 --- a/ocaml/idl/datamodel_common.ml +++ b/ocaml/idl/datamodel_common.ml @@ -742,13 +742,31 @@ let allowed_and_current_operations ?(writer_roles = None) ?(reader_roles = None) operations_type = [ field ~writer_roles ~reader_roles ~persist:false ~in_oss_since:None - ~in_product_since:rel_rio ~qualifier:DynamicRO ~ty:(Set operations_type) + ~lifecycle: + [ + ( Published + , rel_rio + , "list of the operations allowed in this state. This list is \ + advisory only and the server state may have changed by the time \ + this field is read by a client." + ) + ] + ~qualifier:DynamicRO ~ty:(Set operations_type) ~default_value:(Some (VSet [])) "allowed_operations" "list of the operations allowed in this state. This list is advisory \ only and the server state may have changed by the time this field is \ read by a client." ; field ~writer_roles ~reader_roles ~persist:false ~in_oss_since:None - ~in_product_since:rel_rio ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_rio + , "links each of the running tasks using this object (by reference) \ + to a current_operation enum which describes the nature of the \ + task." + ) + ] + ~qualifier:DynamicRO ~ty:(Map (String, operations_type)) ~default_value:(Some (VMap [])) "current_operations" "links each of the running tasks using this object (by reference) to a \ diff --git a/ocaml/idl/datamodel_host.ml b/ocaml/idl/datamodel_host.ml index d34daafc08a..266d695fa34 100644 --- a/ocaml/idl/datamodel_host.ml +++ b/ocaml/idl/datamodel_host.ml @@ -7,7 +7,9 @@ open Datamodel_types let host_memory = let field = field ~ty:Int in [ - field ~qualifier:DynamicRO "overhead" ~in_product_since:rel_rio + field ~qualifier:DynamicRO "overhead" + ~lifecycle: + [(Published, rel_rio, "Virtualization memory overhead (bytes).")] "Virtualization memory overhead (bytes)." ~default_value:(Some (VInt 0L)) ~doc_tags:[Memory] ] @@ -15,14 +17,19 @@ let host_memory = let api_version = let field' = field ~qualifier:DynamicRO in [ - field' ~ty:Int ~in_product_since:rel_rio "major" "major version number" - ; field' ~ty:Int ~in_product_since:rel_rio "minor" "minor version number" - ; field' ~ty:String ~in_product_since:rel_rio "vendor" - "identification of vendor" + field' ~ty:Int + ~lifecycle:[(Published, rel_rio, "major version number")] + "major" "major version number" + ; field' ~ty:Int + ~lifecycle:[(Published, rel_rio, "minor version number")] + "minor" "minor version number" + ; field' ~ty:String + ~lifecycle:[(Published, rel_rio, "identification of vendor")] + "vendor" "identification of vendor" ; field' ~ty:(Map (String, String)) - ~in_product_since:rel_rio "vendor_implementation" - "details of vendor implementation" + ~lifecycle:[(Published, rel_rio, "details of vendor implementation")] + "vendor_implementation" "details of vendor implementation" ] let migrate_receive = @@ -2454,6 +2461,8 @@ let t = ~contents: ([ uid _host + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] ; namespace ~name:"name" ~contents:(names None RW ~lifecycle:[(Published, rel_rio, "")]) () @@ -2463,110 +2472,239 @@ let t = @ [ namespace ~name:"API_version" ~contents:api_version () ; field ~qualifier:DynamicRO ~ty:Bool "enabled" - ~in_product_since:rel_rio "True if the host is currently enabled" + ~lifecycle: + [(Published, rel_rio, "True if the host is currently enabled")] + "True if the host is currently enabled" ; field ~qualifier:StaticRO ~ty:(Map (String, String)) - ~in_product_since:rel_rio "software_version" "version strings" + ~lifecycle:[(Published, rel_rio, "version strings")] + "software_version" "version strings" ; field ~ty:(Map (String, String)) - ~in_product_since:rel_rio "other_config" "additional configuration" + ~lifecycle:[(Published, rel_rio, "additional configuration")] + "other_config" "additional configuration" ~map_keys_roles: [("folder", _R_VM_OP); ("XenCenter.CustomFields.*", _R_VM_OP)] - ; field ~qualifier:StaticRO ~ty:(Set String) ~in_product_since:rel_rio + ; field ~qualifier:StaticRO ~ty:(Set String) + ~lifecycle:[(Published, rel_rio, "Xen capabilities")] "capabilities" "Xen capabilities" ; field ~qualifier:DynamicRO ~ty:(Map (String, String)) - ~in_product_since:rel_rio "cpu_configuration" + ~lifecycle: + [ + ( Published + , rel_rio + , "The CPU configuration on this host. May contain keys such \ + as \"nr_nodes\", \"sockets_per_node\", \ + \"cores_per_socket\", or \"threads_per_core\"" + ) + ] + "cpu_configuration" "The CPU configuration on this host. May contain keys such as \ \"nr_nodes\", \"sockets_per_node\", \"cores_per_socket\", or \ \"threads_per_core\"" - ; field ~qualifier:DynamicRO ~ty:String ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:String + ~lifecycle: + [ + ( Published + , rel_rio + , "Scheduler policy currently in force on this host" + ) + ] "sched_policy" "Scheduler policy currently in force on this host" - ; field ~qualifier:DynamicRO ~ty:(Set String) ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:(Set String) + ~lifecycle: + [ + ( Published + , rel_rio + , "a list of the bootloaders installed on the machine" + ) + ] "supported_bootloaders" "a list of the bootloaders installed on the machine" ; field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) - ~in_product_since:rel_rio "resident_VMs" - "list of VMs currently resident on host" + ~lifecycle: + [(Published, rel_rio, "list of VMs currently resident on host")] + "resident_VMs" "list of VMs currently resident on host" ; field ~qualifier:RW ~ty:(Map (String, String)) - ~in_product_since:rel_rio "logging" "logging configuration" + ~lifecycle:[(Published, rel_rio, "logging configuration")] + "logging" "logging configuration" ; field ~qualifier:DynamicRO ~ty:(Set (Ref _pif)) ~doc_tags:[Networking] - ~in_product_since:rel_rio "PIFs" "physical network interfaces" - ; field ~qualifier:RW ~ty:(Ref _sr) ~in_product_since:rel_rio + ~lifecycle:[(Published, rel_rio, "physical network interfaces")] + "PIFs" "physical network interfaces" + ; field ~qualifier:RW ~ty:(Ref _sr) + ~lifecycle: + [ + ( Published + , rel_rio + , "The SR in which VDIs for suspend images are created" + ) + ] "suspend_image_sr" "The SR in which VDIs for suspend images are created" - ; field ~qualifier:RW ~ty:(Ref _sr) ~in_product_since:rel_rio + ; field ~qualifier:RW ~ty:(Ref _sr) + ~lifecycle: + [ + ( Published + , rel_rio + , "The SR in which VDIs for crash dumps are created" + ) + ] "crash_dump_sr" "The SR in which VDIs for crash dumps are created" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Set of host crash dumps")] ~qualifier:DynamicRO ~ty:(Set (Ref _host_crashdump)) "crashdumps" "Set of host crash dumps" - ; field ~in_oss_since:None ~in_product_since:rel_rio - ~internal_deprecated_since:rel_ely ~qualifier:DynamicRO - ~ty:(Set (Ref _host_patch)) "patches" "Set of host patches" - ; field ~in_oss_since:None ~in_product_since:rel_ely + ; field ~in_oss_since:None + ~lifecycle: + [ + (Published, rel_rio, "Set of host patches") + ; (Deprecated, rel_ely, "") + ] + ~qualifier:DynamicRO ~ty:(Set (Ref _host_patch)) "patches" + "Set of host patches" + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_ely, "Set of updates")] ~qualifier:DynamicRO ~ty:(Set (Ref _pool_update)) "updates" "Set of updates" ; field ~qualifier:DynamicRO ~ty:(Set (Ref _pbd)) - ~in_product_since:rel_rio "PBDs" "physical blockdevices" + ~lifecycle:[(Published, rel_rio, "physical blockdevices")] + "PBDs" "physical blockdevices" ; field ~qualifier:DynamicRO ~ty:(Set (Ref _hostcpu)) - ~in_product_since:rel_rio "host_CPUs" - "The physical CPUs on this host" - ; field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride + ~lifecycle:[(Published, rel_rio, "The physical CPUs on this host")] + "host_CPUs" "The physical CPUs on this host" + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Details about the physical CPUs on this host" + ) + ] ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "cpu_info" "Details about the physical CPUs on this host" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW - ~ty:String ~doc_tags:[Networking] "hostname" + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "The hostname of this host")] + ~qualifier:RW ~ty:String ~doc_tags:[Networking] "hostname" "The hostname of this host" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW - ~ty:String ~doc_tags:[Networking] "address" + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "The address by which this host can be contacted from any \ + other host in the pool" + ) + ] + ~qualifier:RW ~ty:String ~doc_tags:[Networking] "address" "The address by which this host can be contacted from any other \ host in the pool" ; field ~qualifier:DynamicRO ~ty:(Ref _host_metrics) - ~in_product_since:rel_rio "metrics" - "metrics associated with this host" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ~lifecycle: + [(Published, rel_rio, "metrics associated with this host")] + "metrics" "metrics associated with this host" + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "State of the current license")] ~qualifier:DynamicRO ~ty:(Map (String, String)) "license_params" "State of the current license" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~internal_only:true - ~qualifier:DynamicRO ~ty:Int "boot_free_mem" + ; field ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "Free memory on host at boot time")] + ~internal_only:true ~qualifier:DynamicRO ~ty:Int "boot_free_mem" "Free memory on host at boot time" ; field ~in_oss_since:None ~qualifier:DynamicRO - ~in_product_since:rel_orlando ~ty:(Set String) - ~default_value:(Some (VSet [])) "ha_statefiles" + ~lifecycle: + [ + ( Published + , rel_orlando + , "The set of statefiles accessible from this host" + ) + ] + ~ty:(Set String) ~default_value:(Some (VSet [])) "ha_statefiles" "The set of statefiles accessible from this host" ; field ~in_oss_since:None ~qualifier:DynamicRO - ~in_product_since:rel_orlando ~ty:(Set String) - ~default_value:(Some (VSet [])) "ha_network_peers" + ~lifecycle: + [ + ( Published + , rel_orlando + , "The set of hosts visible via the network from this host" + ) + ] + ~ty:(Set String) ~default_value:(Some (VSet [])) "ha_network_peers" "The set of hosts visible via the network from this host" - ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_orlando + , "Binary blobs associated with this host" + ) + ] ~ty:(Map (String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this host" ; field ~writer_roles:_R_VM_OP ~qualifier:RW - ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) - ~ty:(Set String) "tags" + ~lifecycle: + [ + ( Published + , rel_orlando + , "user-specified tags for categorization purposes" + ) + ] + ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes" - ; field ~qualifier:DynamicRO ~in_product_since:rel_george + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_george + , "type of external authentication service configured; empty \ + if none configured." + ) + ] ~default_value:(Some (VString "")) ~ty:String "external_auth_type" "type of external authentication service configured; empty if none \ configured." - ; field ~qualifier:DynamicRO ~in_product_since:rel_george + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_george + , "name of external authentication service configured; empty \ + if none configured." + ) + ] ~default_value:(Some (VString "")) ~ty:String "external_auth_service_name" "name of external authentication service configured; empty if none \ configured." - ; field ~qualifier:DynamicRO ~in_product_since:rel_george + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_george + , "configuration specific to external authentication service" + ) + ] ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "external_auth_configuration" "configuration specific to external authentication service" - ; field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride + ; field ~qualifier:DynamicRO + ~lifecycle:[(Published, rel_midnight_ride, "Product edition")] ~default_value:(Some (VString "")) ~ty:String "edition" "Product edition" - ; field ~qualifier:RW ~in_product_since:rel_midnight_ride + ; field ~qualifier:RW + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Contact information of the license server" + ) + ] ~default_value: (Some (VMap @@ -2578,18 +2716,23 @@ let t = ) ~ty:(Map (String, String)) "license_server" "Contact information of the license server" - ; field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride + ; field ~qualifier:DynamicRO + ~lifecycle:[(Published, rel_midnight_ride, "BIOS strings")] ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "bios_strings" "BIOS strings" - ; field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride + ; field ~qualifier:DynamicRO + ~lifecycle:[(Published, rel_midnight_ride, "The power on mode")] ~default_value:(Some (VString "")) ~ty:String "power_on_mode" "The power on mode" - ; field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride + ; field ~qualifier:DynamicRO + ~lifecycle:[(Published, rel_midnight_ride, "The power on config")] ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "power_on_config" "The power on config" - ; field ~qualifier:StaticRO ~in_product_since:rel_cowley + ; field ~qualifier:StaticRO + ~lifecycle: + [(Published, rel_cowley, "The SR that is used as a local cache")] ~default_value:(Some (VRef null_ref)) ~ty:(Ref _sr) "local_cache_sr" "The SR that is used as a local cache" ; field ~qualifier:DynamicRO @@ -2619,22 +2762,45 @@ let t = restarts its SSL/TLS listening service; typically this takes less \ than a second but existing connections to it will be broken. API \ login sessions will remain valid." - ; field ~qualifier:RW ~in_product_since:rel_tampa + ; field ~qualifier:RW + ~lifecycle: + [ + ( Published + , rel_tampa + , "VCPUs params to apply to all resident guests" + ) + ] ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "guest_VCPUs_params" "VCPUs params to apply to all resident guests" - ; field ~qualifier:RW ~in_product_since:rel_cream + ; field ~qualifier:RW + ~lifecycle: + [ + ( Published + , rel_cream + , "indicates whether the host is configured to output its \ + console to a physical display device" + ) + ] ~default_value:(Some (VEnum "enabled")) ~ty:display "display" "indicates whether the host is configured to output its console to \ a physical display device" - ; field ~qualifier:DynamicRO ~in_product_since:rel_cream + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_cream + , "The set of versions of the virtual hardware platform that \ + the host can offer to its guests" + ) + ] ~default_value:(Some (VSet [VInt 0L])) ~ty:(Set Int) "virtual_hardware_platform_versions" "The set of versions of the virtual hardware platform that the \ host can offer to its guests" ; field ~qualifier:DynamicRO ~default_value:(Some (VRef null_ref)) - ~in_product_since:rel_ely ~ty:(Ref _vm) "control_domain" - "The control domain (domain 0)" + ~lifecycle:[(Published, rel_ely, "The control domain (domain 0)")] + ~ty:(Ref _vm) "control_domain" "The control domain (domain 0)" ; field ~qualifier:DynamicRO ~lifecycle:[(Published, rel_ely, "")] ~ty:(Set (Ref _pool_update)) ~ignore_foreign_key:true @@ -2667,13 +2833,30 @@ let t = ~lifecycle:[(Published, rel_stockholm, "")] ~default_value:(Some (VSet [])) ~ty:(Set String) "editions" "List of all available product editions" - ; field ~qualifier:DynamicRO ~in_product_since:"1.303.0" + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , "1.303.0" + , "The set of pending mandatory guidances after applying \ + updates, which must be applied, as otherwise there may be \ + e.g. VM failures" + ) + ] ~ty:(Set update_guidances) "pending_guidances" ~default_value:(Some (VSet [])) "The set of pending mandatory guidances after applying updates, \ which must be applied, as otherwise there may be e.g. VM failures" - ; field ~qualifier:DynamicRO ~in_product_since:"1.313.0" ~ty:Bool - "tls_verification_enabled" ~default_value:(Some (VBool false)) + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , "1.313.0" + , "True if this host has TLS verifcation enabled" + ) + ] + ~ty:Bool "tls_verification_enabled" + ~default_value:(Some (VBool false)) "True if this host has TLS verifcation enabled" ; field ~qualifier:DynamicRO ~lifecycle:[] ~ty:DateTime "last_software_update" ~default_value:(Some (VDateTime Date.epoch)) diff --git a/ocaml/idl/datamodel_pool.ml b/ocaml/idl/datamodel_pool.ml index f9b0de3d65c..f94a531903c 100644 --- a/ocaml/idl/datamodel_pool.ml +++ b/ocaml/idl/datamodel_pool.ml @@ -1603,24 +1603,48 @@ let t = ; get_guest_secureboot_readiness ] ~contents: - ([uid ~in_oss_since:None _pool] + ([ + uid ~in_oss_since:None + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + _pool + ] @ [ - field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW - ~ty:String "name_label" "Short name" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW - ~ty:String "name_description" "Description" - ; field ~in_oss_since:None ~in_product_since:rel_rio + field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Short name")] + ~qualifier:RW ~ty:String "name_label" "Short name" + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Description")] + ~qualifier:RW ~ty:String "name_description" "Description" + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "The host that is pool master")] ~qualifier:DynamicRO ~ty:(Ref _host) "master" "The host that is pool master" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW - ~ty:(Ref _sr) "default_SR" "Default SR for VDIs" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW - ~ty:(Ref _sr) "suspend_image_SR" + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "Default SR for VDIs")] + ~qualifier:RW ~ty:(Ref _sr) "default_SR" "Default SR for VDIs" + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "The SR in which VDIs for suspend images are created" + ) + ] + ~qualifier:RW ~ty:(Ref _sr) "suspend_image_SR" "The SR in which VDIs for suspend images are created" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:RW - ~ty:(Ref _sr) "crash_dump_SR" + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "The SR in which VDIs for crash dumps are created" + ) + ] + ~qualifier:RW ~ty:(Ref _sr) "crash_dump_SR" "The SR in which VDIs for crash dumps are created" - ; field ~in_oss_since:None ~in_product_since:rel_rio + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "additional configuration")] ~ty:(Map (String, String)) "other_config" "additional configuration" ~map_keys_roles: @@ -1629,68 +1653,165 @@ let t = ; ("XenCenter.CustomFields.*", _R_VM_OP) ; ("EMPTY_FOLDERS", _R_VM_OP) ] - ; field ~in_oss_since:None ~in_product_since:rel_orlando + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_orlando + , "true if HA is enabled on the pool, false otherwise" + ) + ] ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "ha_enabled" "true if HA is enabled on the pool, false otherwise" - ; field ~in_oss_since:None ~in_product_since:rel_orlando + ; field ~in_oss_since:None + ~lifecycle: + [(Published, rel_orlando, "The current HA configuration")] ~qualifier:DynamicRO ~ty:(Map (String, String)) ~default_value:(Some (VMap [])) "ha_configuration" "The current HA configuration" - ; field ~in_oss_since:None ~in_product_since:rel_orlando + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_orlando, "HA statefile VDIs in use")] ~qualifier:DynamicRO ~ty:(Set String) ~default_value:(Some (VSet [])) "ha_statefiles" "HA statefile VDIs in use" - ; field ~in_oss_since:None ~in_product_since:rel_orlando + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_orlando + , "Number of host failures to tolerate before the Pool is \ + declared to be overcommitted" + ) + ] ~qualifier:DynamicRO ~ty:Int ~default_value:(Some (VInt 0L)) "ha_host_failures_to_tolerate" "Number of host failures to tolerate before the Pool is declared \ to be overcommitted" - ; field ~in_oss_since:None ~in_product_since:rel_orlando + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_orlando + , "Number of future host failures we have managed to find a \ + plan for. Once this reaches zero any future host failures \ + will cause the failure of protected VMs." + ) + ] ~qualifier:DynamicRO ~ty:Int ~default_value:(Some (VInt 0L)) "ha_plan_exists_for" "Number of future host failures we have managed to find a plan \ for. Once this reaches zero any future host failures will cause \ the failure of protected VMs." - ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:RW - ~ty:Bool ~default_value:(Some (VBool false)) "ha_allow_overcommit" + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_orlando + , "If set to false then operations which would cause the Pool \ + to become overcommitted will be blocked." + ) + ] + ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) + "ha_allow_overcommit" "If set to false then operations which would cause the Pool to \ become overcommitted will be blocked." - ; field ~in_oss_since:None ~in_product_since:rel_orlando + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_orlando + , "True if the Pool is considered to be overcommitted i.e. if \ + there exist insufficient physical resources to tolerate the \ + configured number of host failures" + ) + ] ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "ha_overcommitted" "True if the Pool is considered to be overcommitted i.e. if there \ exist insufficient physical resources to tolerate the configured \ number of host failures" - ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_orlando + , "Binary blobs associated with this pool" + ) + ] ~ty:(Map (String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this pool" - ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando + ; field ~writer_roles:_R_VM_OP + ~lifecycle: + [ + ( Published + , rel_orlando + , "user-specified tags for categorization purposes" + ) + ] ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes" - ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando + ; field ~writer_roles:_R_VM_OP + ~lifecycle: + [(Published, rel_orlando, "gui-specific configuration for pool")] ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "gui_config" "gui-specific configuration for pool" - ; field ~writer_roles:_R_POOL_OP ~in_product_since:rel_dundee + ; field ~writer_roles:_R_POOL_OP + ~lifecycle: + [ + ( Published + , rel_dundee + , "Configuration for the automatic health check feature" + ) + ] ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "health_check_config" "Configuration for the automatic health check feature" - ; field ~in_product_since:rel_george ~qualifier:DynamicRO ~ty:String - ~default_value:(Some (VString "")) "wlb_url" - "Url for the configured workload balancing host" - ; field ~in_product_since:rel_george ~qualifier:DynamicRO ~ty:String - ~default_value:(Some (VString "")) "wlb_username" - "Username for accessing the workload balancing host" - ; field ~in_product_since:rel_george ~internal_only:true - ~qualifier:DynamicRO ~ty:(Ref _secret) "wlb_password" - "Password for accessing the workload balancing host" + ; field + ~lifecycle: + [ + ( Published + , rel_george + , "Url for the configured workload balancing host" + ) + ] + ~qualifier:DynamicRO ~ty:String ~default_value:(Some (VString "")) + "wlb_url" "Url for the configured workload balancing host" + ; field + ~lifecycle: + [ + ( Published + , rel_george + , "Username for accessing the workload balancing host" + ) + ] + ~qualifier:DynamicRO ~ty:String ~default_value:(Some (VString "")) + "wlb_username" "Username for accessing the workload balancing host" + ; field + ~lifecycle: + [ + ( Published + , rel_george + , "Password for accessing the workload balancing host" + ) + ] + ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _secret) + "wlb_password" "Password for accessing the workload balancing host" ; field ~writer_roles:(_R_POOL_OP ++ _R_CLIENT_CERT) - ~in_product_since:rel_george ~qualifier:RW ~ty:Bool - ~default_value:(Some (VBool false)) "wlb_enabled" + ~lifecycle: + [ + ( Published + , rel_george + , "true if workload balancing is enabled on the pool, false \ + otherwise" + ) + ] + ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) + "wlb_enabled" "true if workload balancing is enabled on the pool, false otherwise" ; field ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) "wlb_verify_cert" @@ -1705,12 +1826,28 @@ let t = Pool.enable_tls_verification instead" ) ] - ; field ~in_oss_since:None ~in_product_since:rel_midnight_ride + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "true a redo-log is to be used other than when HA is \ + enabled, false otherwise" + ) + ] ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "redo_log_enabled" "true a redo-log is to be used other than when HA is enabled, \ false otherwise" - ; field ~in_oss_since:None ~in_product_since:rel_midnight_ride + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "indicates the VDI to use for the redo-log other than when \ + HA is enabled" + ) + ] ~qualifier:DynamicRO ~ty:(Ref _vdi) ~default_value:(Some (VRef null_ref)) "redo_log_vdi" "indicates the VDI to use for the redo-log other than when HA is \ @@ -1730,15 +1867,37 @@ let t = SDN_controller instead." ) ] - ; field ~in_oss_since:None ~in_product_since:rel_midnight_ride + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Pool-wide restrictions currently in effect" + ) + ] ~qualifier:DynamicRO ~ty:(Map (String, String)) ~default_value:(Some (VMap [])) "restrictions" "Pool-wide restrictions currently in effect" - ; field ~in_oss_since:None ~in_product_since:rel_boston + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_boston + , "The set of currently known metadata VDIs for this pool" + ) + ] ~qualifier:DynamicRO ~ty:(Set (Ref _vdi)) "metadata_VDIs" "The set of currently known metadata VDIs for this pool" - ; field ~in_oss_since:None ~in_product_since:rel_dundee + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_dundee + , "The HA cluster stack that is currently in use. Only valid \ + when HA is enabled." + ) + ] ~qualifier:DynamicRO ~default_value:(Some (VString "")) ~ty:String "ha_cluster_stack" "The HA cluster stack that is currently in use. Only valid when HA \ @@ -1746,12 +1905,26 @@ let t = ] @ allowed_and_current_operations operations @ [ - field ~in_oss_since:None ~in_product_since:rel_dundee + field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_dundee + , "Pool-wide guest agent configuration information" + ) + ] ~qualifier:DynamicRO ~ty:(Map (String, String)) ~default_value:(Some (VMap [])) "guest_agent_config" "Pool-wide guest agent configuration information" - ; field ~qualifier:DynamicRO ~in_product_since:rel_dundee + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , rel_dundee + , "Details about the physical CPUs on the pool" + ) + ] ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "cpu_info" "Details about the physical CPUs on the pool" @@ -1765,13 +1938,30 @@ let t = "This field was consulted when VM.create did not specify a value \ for 'has_vendor_device'; VM.create now uses a simple default and \ no longer consults this value." - ; field ~qualifier:RW ~in_product_since:rel_ely + ; field ~qualifier:RW + ~lifecycle: + [ + ( Published + , rel_ely + , "The pool-wide flag to show if the live patching feauture is \ + disabled or not." + ) + ] ~default_value:(Some (VBool false)) ~ty:Bool "live_patching_disabled" "The pool-wide flag to show if the live patching feauture is \ disabled or not." - ; field ~in_product_since:rel_inverness ~qualifier:DynamicRO ~ty:Bool - ~default_value:(Some (VBool false)) "igmp_snooping_enabled" + ; field + ~lifecycle: + [ + ( Published + , rel_inverness + , "true if IGMP snooping is enabled in the pool, false \ + otherwise." + ) + ] + ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) + "igmp_snooping_enabled" "true if IGMP snooping is enabled in the pool, false otherwise." ; field ~qualifier:StaticRO ~ty:String ~lifecycle: @@ -1790,8 +1980,17 @@ let t = ; field ~qualifier:StaticRO ~ty:String ~lifecycle:[] ~default_value:(Some (VString "")) "custom_uefi_certificates" "Custom UEFI certificates allowing Secure Boot" - ; field ~in_product_since:rel_stockholm_psr ~qualifier:RW ~ty:Bool - ~default_value:(Some (VBool false)) "is_psr_pending" + ; field + ~lifecycle: + [ + ( Published + , rel_stockholm_psr + , "True if either a PSR is running or we are waiting for a PSR \ + to be re-run" + ) + ] + ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) + "is_psr_pending" "True if either a PSR is running or we are waiting for a PSR to be \ re-run" ; field ~qualifier:DynamicRO @@ -1799,8 +1998,16 @@ let t = ~ty:Bool ~default_value:(Some (VBool false)) "tls_verification_enabled" "True iff TLS certificate verification is enabled" - ; field ~in_product_since:"1.301.0" ~qualifier:DynamicRO - ~ty:(Set (Ref _repository)) ~ignore_foreign_key:true "repositories" + ; field + ~lifecycle: + [ + ( Published + , "1.301.0" + , "The set of currently enabled repositories" + ) + ] + ~qualifier:DynamicRO ~ty:(Set (Ref _repository)) + ~ignore_foreign_key:true "repositories" ~default_value:(Some (VSet [])) "The set of currently enabled repositories" ; field ~qualifier:DynamicRO @@ -1814,11 +2021,29 @@ let t = "client_certificate_auth_name" "The name (CN/SAN) that an incoming client certificate must have \ to allow authentication" - ; field ~in_product_since:"21.3.0" ~qualifier:DynamicRO ~ty:String - ~default_value:(Some (VString "")) "repository_proxy_url" + ; field + ~lifecycle: + [ + ( Published + , "21.3.0" + , "Url of the proxy used in syncing with the enabled \ + repositories" + ) + ] + ~qualifier:DynamicRO ~ty:String ~default_value:(Some (VString "")) + "repository_proxy_url" "Url of the proxy used in syncing with the enabled repositories" - ; field ~in_product_since:"21.3.0" ~qualifier:DynamicRO ~ty:String - ~default_value:(Some (VString "")) "repository_proxy_username" + ; field + ~lifecycle: + [ + ( Published + , "21.3.0" + , "Username for the authentication of the proxy used in \ + syncing with the enabled repositories" + ) + ] + ~qualifier:DynamicRO ~ty:String ~default_value:(Some (VString "")) + "repository_proxy_username" "Username for the authentication of the proxy used in syncing with \ the enabled repositories" ; field ~qualifier:DynamicRO @@ -1836,7 +2061,15 @@ let t = "Default behaviour during migration, True if stream compression \ should be used" ; field ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool true)) - ~in_product_since:rel_rio "coordinator_bias" + ~lifecycle: + [ + ( Published + , rel_rio + , "true if bias against pool master when scheduling vms is \ + enabled, false otherwise" + ) + ] + "coordinator_bias" "true if bias against pool master when scheduling vms is enabled, \ false otherwise" ; field ~qualifier:StaticRO ~ty:Int ~default_value:(Some (VInt 8L)) diff --git a/ocaml/idl/datamodel_vm.ml b/ocaml/idl/datamodel_vm.ml index 4815a29c1ce..44ca1466d78 100644 --- a/ocaml/idl/datamodel_vm.ml +++ b/ocaml/idl/datamodel_vm.ml @@ -21,14 +21,25 @@ let vmpp_deprecated = let pv = [ - field ~in_product_since:rel_rio "bootloader" "name of or path to bootloader" - ; field ~in_product_since:rel_rio "kernel" "path to the kernel" - ; field ~in_product_since:rel_rio "ramdisk" "path to the initrd" - ; field ~in_product_since:rel_rio "args" "kernel command-line arguments" - ; field ~in_product_since:rel_rio "bootloader_args" - "miscellaneous arguments for the bootloader" - ; field ~in_oss_since:None ~in_product_since:rel_rio "legacy_args" - "to make Zurich guests boot" + field + ~lifecycle:[(Published, rel_rio, "name of or path to bootloader")] + "bootloader" "name of or path to bootloader" + ; field + ~lifecycle:[(Published, rel_rio, "path to the kernel")] + "kernel" "path to the kernel" + ; field + ~lifecycle:[(Published, rel_rio, "path to the initrd")] + "ramdisk" "path to the initrd" + ; field + ~lifecycle:[(Published, rel_rio, "kernel command-line arguments")] + "args" "kernel command-line arguments" + ; field + ~lifecycle: + [(Published, rel_rio, "miscellaneous arguments for the bootloader")] + "bootloader_args" "miscellaneous arguments for the bootloader" + ; field ~in_oss_since:None + ~lifecycle:[(Published, rel_rio, "to make Zurich guests boot")] + "legacy_args" "to make Zurich guests boot" ] (** HVM domain booting *) @@ -41,11 +52,20 @@ let hvm = ; (Deprecated, rel_kolkata, "Replaced by VM.domain_type") ] "boot_policy" "HVM boot policy" - ; field ~in_product_since:rel_rio + ; field + ~lifecycle:[(Published, rel_rio, "HVM boot params")] ~ty:(Map (String, String)) "boot_params" "HVM boot params" ; field ~writer_roles:_R_VM_POWER_ADMIN ~in_oss_since:None ~ty:Float - ~in_product_since:rel_miami ~qualifier:StaticRO "shadow_multiplier" + ~lifecycle: + [ + ( Published + , rel_miami + , "multiplier applied to the amount of shadow that will be made \ + available to the guest" + ) + ] + ~qualifier:StaticRO "shadow_multiplier" "multiplier applied to the amount of shadow that will be made available \ to the guest" ~default_value:(Some (VFloat 1.)) @@ -54,29 +74,60 @@ let hvm = let guest_memory = let field = field ~ty:Int in [ - field "overhead" ~in_product_since:rel_rio ~writer_roles:_R_VM_POWER_ADMIN - ~qualifier:DynamicRO "Virtualization memory overhead (bytes)." - ~default_value:(Some (VInt 0L)) ~doc_tags:[Memory] - ; field "target" ~in_product_since:rel_rio ~writer_roles:_R_VM_POWER_ADMIN - ~qualifier:StaticRO + field "overhead" + ~lifecycle: + [(Published, rel_rio, "Virtualization memory overhead (bytes).")] + ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO + "Virtualization memory overhead (bytes)." ~default_value:(Some (VInt 0L)) + ~doc_tags:[Memory] + ; field "target" + ~lifecycle: + [ + ( Published + , rel_rio + , "Dynamically-set memory target (bytes). The value of this field \ + indicates the current target for memory available to this VM." + ) + ; (Deprecated, rel_midnight_ride, "") + ] + ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Dynamically-set memory target (bytes). The value of this field \ indicates the current target for memory available to this VM." - ~default_value:(Some (VInt 0L)) - ~internal_deprecated_since:rel_midnight_ride ~doc_tags:[Memory] - ; field "static_max" ~in_product_since:rel_rio ~writer_roles:_R_VM_POWER_ADMIN - ~qualifier:StaticRO + ~default_value:(Some (VInt 0L)) ~doc_tags:[Memory] + ; field "static_max" + ~lifecycle: + [ + ( Published + , rel_rio + , "Statically-set (i.e. absolute) maximum (bytes). The value of this \ + field at VM start time acts as a hard limit of the amount of \ + memory a guest can use. New values only take effect on reboot." + ) + ] + ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Statically-set (i.e. absolute) maximum (bytes). The value of this field \ at VM start time acts as a hard limit of the amount of memory a guest \ can use. New values only take effect on reboot." ~doc_tags:[Memory] - ; field "dynamic_max" ~in_product_since:rel_rio + ; field "dynamic_max" + ~lifecycle:[(Published, rel_rio, "Dynamic maximum (bytes)")] ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Dynamic maximum (bytes)" ~doc_tags:[Memory] - ; field "dynamic_min" ~in_product_since:rel_rio + ; field "dynamic_min" + ~lifecycle:[(Published, rel_rio, "Dynamic minimum (bytes)")] ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Dynamic minimum (bytes)" ~doc_tags:[Memory] - ; field "static_min" ~in_product_since:rel_rio ~writer_roles:_R_VM_POWER_ADMIN - ~qualifier:StaticRO + ; field "static_min" + ~lifecycle: + [ + ( Published + , rel_rio + , "Statically-set (i.e. absolute) mininum (bytes). The value of this \ + field indicates the least amount of memory this VM can boot with \ + without crashing." + ) + ] + ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Statically-set (i.e. absolute) mininum (bytes). The value of this field \ indicates the least amount of memory this VM can boot with without \ crashing." @@ -119,21 +170,36 @@ let on_normal_exit_behaviour = (** Virtual CPUs *) let vcpus = [ - field ~in_product_since:rel_rio + field + ~lifecycle: + [ + ( Published + , rel_rio + , "configuration parameters for the selected VCPU policy" + ) + ] ~ty:(Map (String, String)) "params" "configuration parameters for the selected VCPU policy" - ; field ~in_product_since:rel_rio ~qualifier:StaticRO ~ty:Int "max" - "Max number of VCPUs" - ; field ~in_product_since:rel_rio ~qualifier:StaticRO ~ty:Int "at_startup" - "Boot number of VCPUs" + ; field + ~lifecycle:[(Published, rel_rio, "Max number of VCPUs")] + ~qualifier:StaticRO ~ty:Int "max" "Max number of VCPUs" + ; field + ~lifecycle:[(Published, rel_rio, "Boot number of VCPUs")] + ~qualifier:StaticRO ~ty:Int "at_startup" "Boot number of VCPUs" ] (** Default actions *) let actions = - let crash = - field ~in_product_since:rel_rio ~qualifier:StaticRO ~ty:on_crash_behaviour + let crash name descr = + field ~qualifier:StaticRO ~ty:on_crash_behaviour + ~lifecycle:[(Published, rel_rio, descr)] + name descr + in + let normal name descr = + field ~ty:on_normal_exit_behaviour + ~lifecycle:[(Published, rel_rio, descr)] + name descr in - let normal = field ~in_product_since:rel_rio ~ty:on_normal_exit_behaviour in let soft = field ~qualifier:RW ~lifecycle:[] ~ty:on_softreboot_behavior ~default_value:(Some (VEnum "soft_reboot")) @@ -2493,7 +2559,11 @@ let t = ; remove_from_blocked_operations ] ~contents: - ([uid _vm] + ([ + uid _vm + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + ] @ allowed_and_current_operations operations @ [ namespace ~name:"name" @@ -2511,9 +2581,25 @@ let t = ) ] ~ty:power_state "power_state" "Current power state of the machine" - ; field ~ty:Int "user_version" ~in_product_since:rel_rio + ; field ~ty:Int "user_version" + ~lifecycle: + [ + ( Published + , rel_rio + , "Creators of VMs and templates may store version information \ + here." + ) + ] "Creators of VMs and templates may store version information here." - ; field ~effect:true ~ty:Bool "is_a_template" ~in_product_since:rel_rio + ; field ~effect:true ~ty:Bool "is_a_template" + ~lifecycle: + [ + ( Published + , rel_rio + , "true if this is a template. Template VMs can never be \ + started, they are used only for cloning other VMs" + ) + ] "true if this is a template. Template VMs can never be started, \ they are used only for cloning other VMs" ; field ~ty:Bool ~default_value:(Some (VBool false)) @@ -2535,44 +2621,74 @@ let t = ~ty:(Ref _vdi) "suspend_VDI" "The VDI that a suspend image is stored on. (Only has meaning if \ VM is currently suspended)" - ; field ~in_product_since:rel_rio ~writer_roles:_R_VM_POWER_ADMIN - ~qualifier:DynamicRO ~ty:(Ref _host) "resident_on" - "the host the VM is currently resident on" + ; field + ~lifecycle: + [(Published, rel_rio, "the host the VM is currently resident on")] + ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~ty:(Ref _host) + "resident_on" "the host the VM is currently resident on" ; field ~writer_roles:_R_VM_POWER_ADMIN ~in_oss_since:None - ~in_product_since:rel_rio ~qualifier:DynamicRO - ~default_value:(Some (VRef null_ref)) ~ty:(Ref _host) - "scheduled_to_be_resident_on" + ~lifecycle: + [ + ( Published + , rel_rio + , "the host on which the VM is due to be \ + started/resumed/migrated. This acts as a memory reservation \ + indicator" + ) + ] + ~qualifier:DynamicRO ~default_value:(Some (VRef null_ref)) + ~ty:(Ref _host) "scheduled_to_be_resident_on" "the host on which the VM is due to be started/resumed/migrated. \ This acts as a memory reservation indicator" ; field ~writer_roles:_R_VM_POWER_ADMIN ~in_oss_since:None - ~in_product_since:rel_rio ~ty:(Ref _host) "affinity" + ~lifecycle: + [ + ( Published + , rel_rio + , "A host which the VM has some affinity for (or NULL). This \ + is used as a hint to the start call when it decides where \ + to run the VM. Resource constraints may cause the VM to be \ + started elsewhere." + ) + ] + ~ty:(Ref _host) "affinity" "A host which the VM has some affinity for (or NULL). This is used \ as a hint to the start call when it decides where to run the VM. \ Resource constraints may cause the VM to be started elsewhere." ; namespace ~name:"memory" ~contents:guest_memory () ; namespace ~name:"VCPUs" ~contents:vcpus () ; namespace ~name:"actions" ~contents:actions () - ; field ~in_product_since:rel_rio ~writer_roles:_R_POOL_ADMIN - ~qualifier:DynamicRO ~ty:(Set (Ref _console)) "consoles" - "virtual console devices" - ; field ~in_product_since:rel_rio ~qualifier:DynamicRO - ~ty:(Set (Ref _vif)) ~doc_tags:[Networking] "VIFs" - "virtual network interfaces" - ; field ~in_product_since:rel_rio ~qualifier:DynamicRO - ~ty:(Set (Ref _vbd)) "VBDs" "virtual block devices" - ; field ~in_product_since:rel_rio ~qualifier:DynamicRO - ~ty:(Set (Ref _vusb)) "VUSBs" "virtual usb devices" - ; field ~in_product_since:rel_rio ~writer_roles:_R_POOL_ADMIN - ~qualifier:DynamicRO ~ty:(Set (Ref _crashdump)) "crash_dumps" + ; field + ~lifecycle:[(Published, rel_rio, "virtual console devices")] + ~writer_roles:_R_POOL_ADMIN ~qualifier:DynamicRO + ~ty:(Set (Ref _console)) "consoles" "virtual console devices" + ; field + ~lifecycle:[(Published, rel_rio, "virtual network interfaces")] + ~qualifier:DynamicRO ~ty:(Set (Ref _vif)) ~doc_tags:[Networking] + "VIFs" "virtual network interfaces" + ; field + ~lifecycle:[(Published, rel_rio, "virtual block devices")] + ~qualifier:DynamicRO ~ty:(Set (Ref _vbd)) "VBDs" + "virtual block devices" + ; field + ~lifecycle:[(Published, rel_rio, "virtual usb devices")] + ~qualifier:DynamicRO ~ty:(Set (Ref _vusb)) "VUSBs" + "virtual usb devices" + ; field + ~lifecycle: + [(Published, rel_rio, "crash dumps associated with this VM")] + ~writer_roles:_R_POOL_ADMIN ~qualifier:DynamicRO + ~ty:(Set (Ref _crashdump)) "crash_dumps" "crash dumps associated with this VM" - ; field ~in_product_since:rel_rio ~qualifier:DynamicRO - ~ty:(Set (Ref _vtpm)) "VTPMs" "virtual TPMs" + ; field + ~lifecycle:[(Published, rel_rio, "virtual TPMs")] + ~qualifier:DynamicRO ~ty:(Set (Ref _vtpm)) "VTPMs" "virtual TPMs" ; namespace ~name:"PV" ~contents:pv () ; namespace ~name:"HVM" ~contents:hvm () ; field ~ty:(Map (String, String)) - ~in_product_since:rel_rio "platform" - "platform-specific configuration" + ~lifecycle:[(Published, rel_rio, "platform-specific configuration")] + "platform" "platform-specific configuration" ; field ~lifecycle: [ @@ -2580,7 +2696,8 @@ let t = ; (Deprecated, rel_boston, "Field was never used") ] "PCI_bus" "PCI bus path for pass-through devices" - ; field ~in_product_since:rel_rio + ; field + ~lifecycle:[(Published, rel_rio, "additional configuration")] ~ty:(Map (String, String)) "other_config" "additional configuration" ~map_keys_roles: @@ -2589,22 +2706,53 @@ let t = ; ("folder", _R_VM_OP) ; ("XenCenter.CustomFields.*", _R_VM_OP) ] - ; field ~qualifier:DynamicRO ~ty:Int "domid" ~in_product_since:rel_rio + ; field ~qualifier:DynamicRO ~ty:Int "domid" + ~lifecycle: + [(Published, rel_rio, "domain ID (if available, -1 otherwise)")] "domain ID (if available, -1 otherwise)" ; field ~qualifier:DynamicRO ~in_oss_since:None - ~in_product_since:rel_rio ~ty:String "domarch" + ~lifecycle: + [ + ( Published + , rel_rio + , "Domain architecture (if available, null string otherwise)" + ) + ] + ~ty:String "domarch" "Domain architecture (if available, null string otherwise)" - ; field ~in_oss_since:None ~in_product_since:rel_rio ~qualifier:StaticRO + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "describes the CPU flags on which the VM was last booted" + ) + ] + ~qualifier:StaticRO ~ty:(Map (String, String)) ~default_value:(Some (VMap [])) "last_boot_CPU_flags" "describes the CPU flags on which the VM was last booted" - ; field ~in_product_since:rel_rio ~qualifier:DynamicRO ~ty:Bool - "is_control_domain" + ; field + ~lifecycle: + [ + ( Published + , rel_rio + , "true if this is a control domain (domain 0 or a driver \ + domain)" + ) + ] + ~qualifier:DynamicRO ~ty:Bool "is_control_domain" "true if this is a control domain (domain 0 or a driver domain)" - ; field ~in_product_since:rel_rio ~qualifier:DynamicRO - ~ty:(Ref _vm_metrics) "metrics" "metrics associated with this VM" - ; field ~in_product_since:rel_rio ~qualifier:DynamicRO - ~ty:(Ref _vm_guest_metrics) "guest_metrics" + ; field + ~lifecycle:[(Published, rel_rio, "metrics associated with this VM")] + ~qualifier:DynamicRO ~ty:(Ref _vm_metrics) "metrics" + "metrics associated with this VM" + ; field + ~lifecycle: + [ + (Published, rel_rio, "metrics associated with the running guest") + ] + ~qualifier:DynamicRO ~ty:(Ref _vm_guest_metrics) "guest_metrics" "metrics associated with the running guest" ; (* This was an internal field in Rio, Miami beta1, Miami beta2 but is now exposed so that it will be included automatically in Miami GA exports and can be restored, important if @@ -2626,25 +2774,59 @@ let t = ~qualifier:StaticRO ~ty:String "last_booted_record" "marshalled value containing VM record at time of last boot" ~default_value:(Some (VString "")) - ; field ~in_oss_since:None ~in_product_since:rel_rio ~ty:String - "recommendations" + ; field ~in_oss_since:None + ~lifecycle: + [ + ( Published + , rel_rio + , "An XML specification of recommended values and ranges for \ + properties of this VM" + ) + ] + ~ty:String "recommendations" "An XML specification of recommended values and ranges for \ properties of this VM" ; field ~effect:true ~in_oss_since:None ~ty:(Map (String, String)) - ~in_product_since:rel_miami ~qualifier:RW "xenstore_data" + ~lifecycle: + [ + ( Published + , rel_miami + , "data to be inserted into the xenstore tree \ + (/local/domain//vm-data) after the VM is created." + ) + ] + ~qualifier:RW "xenstore_data" "data to be inserted into the xenstore tree \ (/local/domain//vm-data) after the VM is created." ~default_value:(Some (VMap [])) ; field ~writer_roles:_R_POOL_OP ~in_oss_since:None ~ty:Bool - ~in_product_since:rel_orlando ~internal_deprecated_since:rel_boston + ~lifecycle: + [ + ( Published + , rel_orlando + , "if true then the system will attempt to keep the VM running \ + as much as possible." + ) + ; (Deprecated, rel_boston, "") + ] ~qualifier:StaticRO "ha_always_run" "if true then the system will attempt to keep the VM running as \ much as possible." ~default_value:(Some (VBool false)) ; field ~writer_roles:_R_POOL_OP ~in_oss_since:None ~ty:String - ~in_product_since:rel_orlando ~qualifier:StaticRO - "ha_restart_priority" + ~lifecycle: + [ + ( Published + , rel_orlando + , "has possible values: \"best-effort\" meaning \"try to \ + restart this VM if possible but don't consider the Pool to \ + be overcommitted if this is not possible\"; \"restart\" \ + meaning \"this VM should be restarted\"; \"\" meaning \"do \ + not try to restart this VM\"" + ) + ] + ~qualifier:StaticRO "ha_restart_priority" "has possible values: \"best-effort\" meaning \"try to restart \ this VM if possible but don't consider the Pool to be \ overcommitted if this is not possible\"; \"restart\" meaning \ @@ -2652,54 +2834,127 @@ let t = restart this VM\"" ~default_value:(Some (VString "")) ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO - ~in_product_since:rel_orlando ~default_value:(Some (VBool false)) - ~ty:Bool "is_a_snapshot" + ~lifecycle: + [ + ( Published + , rel_orlando + , "true if this is a snapshot. Snapshotted VMs can never be \ + started, they are used only for cloning other VMs" + ) + ] + ~default_value:(Some (VBool false)) ~ty:Bool "is_a_snapshot" "true if this is a snapshot. Snapshotted VMs can never be started, \ they are used only for cloning other VMs" ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO - ~in_product_since:rel_orlando ~default_value:(Some (VRef "")) - ~ty:(Ref _vm) "snapshot_of" + ~lifecycle: + [ + ( Published + , rel_orlando + , "Ref pointing to the VM this snapshot is of." + ) + ] + ~default_value:(Some (VRef "")) ~ty:(Ref _vm) "snapshot_of" "Ref pointing to the VM this snapshot is of." ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO - ~in_product_since:rel_orlando ~ty:(Set (Ref _vm)) "snapshots" + ~lifecycle: + [ + ( Published + , rel_orlando + , "List pointing to all the VM snapshots." + ) + ] + ~ty:(Set (Ref _vm)) "snapshots" "List pointing to all the VM snapshots." ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO - ~in_product_since:rel_orlando + ~lifecycle: + [ + ( Published + , rel_orlando + , "Date/time when this snapshot was created." + ) + ] ~default_value:(Some (VDateTime Date.epoch)) ~ty:DateTime "snapshot_time" "Date/time when this snapshot was created." ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO - ~in_product_since:rel_orlando ~default_value:(Some (VString "")) - ~ty:String "transportable_snapshot_id" - "Transportable ID of the snapshot VM" - ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando + ~lifecycle: + [(Published, rel_orlando, "Transportable ID of the snapshot VM")] + ~default_value:(Some (VString "")) ~ty:String + "transportable_snapshot_id" "Transportable ID of the snapshot VM" + ; field ~qualifier:DynamicRO + ~lifecycle: + [(Published, rel_orlando, "Binary blobs associated with this VM")] ~ty:(Map (String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this VM" - ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando + ; field ~writer_roles:_R_VM_OP + ~lifecycle: + [ + ( Published + , rel_orlando + , "user-specified tags for categorization purposes" + ) + ] ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes" - ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) - ~qualifier:StaticRO + ; field + ~lifecycle: + [ + ( Published + , rel_orlando + , "List of operations which have been explicitly blocked and \ + an error code" + ) + ] + ~default_value:(Some (VMap [])) ~qualifier:StaticRO ~ty:(Map (operations, String)) "blocked_operations" "List of operations which have been explicitly blocked and an \ error code" ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO - ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Human-readable information concerning this snapshot" + ) + ] + ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "snapshot_info" "Human-readable information concerning this snapshot" ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO - ~in_product_since:rel_midnight_ride + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Encoded information about the VM's metadata this is a \ + snapshot of" + ) + ] ~default_value:(Some (VString "")) ~ty:String "snapshot_metadata" "Encoded information about the VM's metadata this is a snapshot of" ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO - ~in_product_since:rel_midnight_ride ~default_value:(Some (VRef "")) - ~ty:(Ref _vm) "parent" "Ref pointing to the parent of this VM" + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "Ref pointing to the parent of this VM" + ) + ] + ~default_value:(Some (VRef "")) ~ty:(Ref _vm) "parent" + "Ref pointing to the parent of this VM" ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO - ~in_product_since:rel_midnight_ride ~ty:(Set (Ref _vm)) "children" + ~lifecycle: + [ + ( Published + , rel_midnight_ride + , "List pointing to all the children of this VM" + ) + ] + ~ty:(Set (Ref _vm)) "children" "List pointing to all the children of this VM" - ; field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride + ; field ~qualifier:DynamicRO + ~lifecycle:[(Published, rel_midnight_ride, "BIOS strings")] ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "bios_strings" "BIOS strings" @@ -2712,30 +2967,65 @@ let t = "is_snapshot_from_vmpp" "true if this snapshot was created by the protection policy" ; field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO - ~in_product_since:rel_falcon ~default_value:(Some (VRef null_ref)) - ~ty:(Ref _vmss) "snapshot_schedule" + ~lifecycle: + [ + ( Published + , rel_falcon + , "Ref pointing to a snapshot schedule for this VM" + ) + ] + ~default_value:(Some (VRef null_ref)) ~ty:(Ref _vmss) + "snapshot_schedule" "Ref pointing to a snapshot schedule for this VM" ; field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO - ~in_product_since:rel_falcon ~default_value:(Some (VBool false)) - ~ty:Bool "is_vmss_snapshot" + ~lifecycle: + [ + ( Published + , rel_falcon + , "true if this snapshot was created by the snapshot schedule" + ) + ] + ~default_value:(Some (VBool false)) ~ty:Bool "is_vmss_snapshot" "true if this snapshot was created by the snapshot schedule" ; field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO - ~in_product_since:rel_rio ~ty:(Ref _vm_appliance) - ~default_value:(Some (VRef null_ref)) "appliance" - "the appliance to which this VM belongs" + ~lifecycle: + [(Published, rel_rio, "the appliance to which this VM belongs")] + ~ty:(Ref _vm_appliance) ~default_value:(Some (VRef null_ref)) + "appliance" "the appliance to which this VM belongs" ; field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO - ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int - "start_delay" + ~lifecycle: + [ + ( Published + , rel_boston + , "The delay to wait before proceeding to the next order in \ + the startup sequence (seconds)" + ) + ] + ~default_value:(Some (VInt 0L)) ~ty:Int "start_delay" "The delay to wait before proceeding to the next order in the \ startup sequence (seconds)" ; field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO - ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int - "shutdown_delay" + ~lifecycle: + [ + ( Published + , rel_boston + , "The delay to wait before proceeding to the next order in \ + the shutdown sequence (seconds)" + ) + ] + ~default_value:(Some (VInt 0L)) ~ty:Int "shutdown_delay" "The delay to wait before proceeding to the next order in the \ shutdown sequence (seconds)" ; field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO - ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int - "order" + ~lifecycle: + [ + ( Published + , rel_boston + , "The point in the startup or shutdown sequence at which this \ + VM will be started" + ) + ] + ~default_value:(Some (VInt 0L)) ~ty:Int "order" "The point in the startup or shutdown sequence at which this VM \ will be started" ; field ~qualifier:DynamicRO @@ -2746,18 +3036,38 @@ let t = ~ty:(Set (Ref _pci)) "attached_PCIs" "Currently passed-through PCI devices" ; field ~writer_roles:_R_VM_ADMIN ~qualifier:RW - ~in_product_since:rel_boston ~default_value:(Some (VRef null_ref)) - ~ty:(Ref _sr) "suspend_SR" + ~lifecycle: + [ + ( Published + , rel_boston + , "The SR on which a suspend image is stored" + ) + ] + ~default_value:(Some (VRef null_ref)) ~ty:(Ref _sr) "suspend_SR" "The SR on which a suspend image is stored" - ; field ~qualifier:StaticRO ~in_product_since:rel_boston + ; field ~qualifier:StaticRO + ~lifecycle: + [ + ( Published + , rel_boston + , "The number of times this VM has been recovered" + ) + ] ~default_value:(Some (VInt 0L)) ~ty:Int "version" "The number of times this VM has been recovered" - ; field ~qualifier:StaticRO ~in_product_since:rel_clearwater + ; field ~qualifier:StaticRO + ~lifecycle:[(Published, rel_clearwater, "Generation ID of the VM")] ~default_value:(Some (VString "0:0")) ~ty:String "generation_id" "Generation ID of the VM" ; field ~writer_roles:_R_VM_ADMIN ~qualifier:RW - ~in_product_since:rel_cream ~default_value:(Some (VInt 0L)) ~ty:Int - "hardware_platform_version" + ~lifecycle: + [ + ( Published + , rel_cream + , "The host virtual hardware platform version the VM can run on" + ) + ] + ~default_value:(Some (VInt 0L)) ~ty:Int "hardware_platform_version" "The host virtual hardware platform version the VM can run on" ; field ~qualifier:StaticRO ~lifecycle: @@ -2778,7 +3088,18 @@ let t = ~default_value:(Some (VBool false)) "requires_reboot" "Indicates whether a VM requires a reboot in order to update its \ configuration, e.g. its memory allocation." - ; field ~qualifier:StaticRO ~ty:String ~in_product_since:rel_ely + ; field ~qualifier:StaticRO ~ty:String + ~lifecycle: + [ + ( Published + , rel_ely + , "Textual reference to the template used to create a VM. This \ + can be used by clients in need of an immutable reference to \ + the template since the latter's uuid and name_label may \ + change, for example, after a package installation or \ + upgrade." + ) + ] ~default_value:(Some (VString "")) "reference_label" "Textual reference to the template used to create a VM. This can \ be used by clients in need of an immutable reference to the \ @@ -2802,7 +3123,16 @@ let t = "NVRAM" ~default_value:(Some (VMap [])) "initial value for guest NVRAM (containing UEFI variables, etc). \ Cannot be changed while the VM is running" - ; field ~qualifier:DynamicRO ~in_product_since:"1.303.0" + ; field ~qualifier:DynamicRO + ~lifecycle: + [ + ( Published + , "1.303.0" + , "The set of pending mandatory guidances after applying \ + updates, which must be applied, as otherwise there may be \ + e.g. VM failures" + ) + ] ~ty:(Set update_guidances) "pending_guidances" ~default_value:(Some (VSet [])) "The set of pending mandatory guidances after applying updates, \ diff --git a/ocaml/idl/datamodel_vm_group.ml b/ocaml/idl/datamodel_vm_group.ml index efc86791bd8..75924f8c150 100644 --- a/ocaml/idl/datamodel_vm_group.ml +++ b/ocaml/idl/datamodel_vm_group.ml @@ -33,6 +33,8 @@ let t = ~contents: [ uid _vm_group + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] ; namespace ~name:"name" ~contents:(names ~lifecycle:[(Published, rel_rio, "")] None RW) () diff --git a/ocaml/idl/datamodel_vtpm.ml b/ocaml/idl/datamodel_vtpm.ml index 3c31614a9a1..692aae12637 100644 --- a/ocaml/idl/datamodel_vtpm.ml +++ b/ocaml/idl/datamodel_vtpm.ml @@ -73,14 +73,31 @@ let t = ~contents: (List.concat [ - [uid _vtpm] + [ + uid _vtpm + ~lifecycle: + [(Published, rel_rio, "Unique identifier/object reference")] + ] ; allowed_and_current_operations operations ; [ field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" - ~in_product_since:rel_rio + ~lifecycle: + [ + ( Published + , rel_rio + , "The virtual machine the TPM is attached to" + ) + ] "The virtual machine the TPM is attached to" ; field ~qualifier:DynamicRO ~ty:(Ref _vm) "backend" - ~in_product_since:rel_rio ~default_value:(Some (VRef null_ref)) + ~lifecycle: + [ + ( Published + , rel_rio + , "The domain where the backend is located (unused)" + ) + ] + ~default_value:(Some (VRef null_ref)) "The domain where the backend is located (unused)" ; field ~qualifier:DynamicRO ~ty:persistence_backend ~default_value:(Some (VEnum "xapi")) ~lifecycle:[] From 51ccf7036585a357caf4a62c70c14baa34e391b0 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Mon, 23 Sep 2024 15:55:34 +0100 Subject: [PATCH 040/141] idl: Remove in_product_since and internal_deprecated_since parameters for fields Signed-off-by: Andrii Sultanov --- ocaml/idl/datamodel_common.ml | 43 ++++------------------------------- 1 file changed, 5 insertions(+), 38 deletions(-) diff --git a/ocaml/idl/datamodel_common.ml b/ocaml/idl/datamodel_common.ml index 6ae642be5b3..7c557282259 100644 --- a/ocaml/idl/datamodel_common.ml +++ b/ocaml/idl/datamodel_common.ml @@ -653,46 +653,16 @@ let operation_enum x = (x.msg_name, Printf.sprintf "refers to the operation \"%s\"" x.msg_name) (** Make an object field record *) -let field ?(in_oss_since = Some "3.0.3") ?in_product_since - ?(internal_only = false) ?internal_deprecated_since +let field ?(in_oss_since = Some "3.0.3") ?(internal_only = false) ?(ignore_foreign_key = false) ?(writer_roles = None) ?(reader_roles = None) ?(qualifier = RW) ?(ty = String) ?(effect = false) ?(default_value = None) ?(persist = true) ?(map_keys_roles = []) ?(* list of (key_name,(writer_roles)) for a map field *) lifecycle ?(doc_tags = []) name desc = - (* in_product_since currently defaults to 'Some rel_rio', for backwards compatibility. - * This should eventually become 'None'. *) - let _ = - match (lifecycle, in_product_since) with - | None, None -> - failwith ("Lifecycle for field '" ^ name ^ "' not specified") - | Some _, Some _ -> - failwith - ("lifecycle is given, in_product_since should not be specified \ - explicitly in " - ^ name - ) - | _, _ -> - () - in let lifecycle = match lifecycle with | None -> - let published = - match in_product_since with - | None -> - [] - | Some rel -> - [(Published, rel, desc)] - in - let deprecated = - match internal_deprecated_since with - | None -> - [] - | Some rel -> - [(Deprecated, rel, "")] - in - published @ deprecated + failwith ("Lifecycle for field '" ^ name ^ "' not specified") | Some l -> l in @@ -731,9 +701,7 @@ let field ?(in_oss_since = Some "3.0.3") ?in_product_since let uid ?(in_oss_since = Some "3.0.3") ?(reader_roles = None) ?lifecycle _refname = - let in_product_since = if lifecycle = None then Some rel_rio else None in - field ~in_oss_since ?in_product_since ?lifecycle ~qualifier:DynamicRO - ~ty:String + field ~in_oss_since ?lifecycle ~qualifier:DynamicRO ~ty:String ~writer_roles:_R_POOL_ADMIN (* only the system should be able to create/modify uuids *) ~reader_roles "uuid" "Unique identifier/object reference" @@ -794,10 +762,9 @@ let namespace ?(get_field_writer_roles = fun x -> x) (** Many of the objects have a set of names of various lengths: *) let names ?(writer_roles = None) ?(reader_roles = None) ?lifecycle in_oss_since qual = - let in_product_since = if lifecycle = None then Some rel_rio else None in let field x y = - field x y ~in_oss_since ?in_product_since ~qualifier:qual ~writer_roles - ~reader_roles ~default_value:(Some (VString "")) ?lifecycle + field x y ~in_oss_since ~qualifier:qual ~writer_roles ~reader_roles + ~default_value:(Some (VString "")) ?lifecycle in [ field "label" "a human-readable name" From 2f50ab1a81ec4bf4cd51a761de9408407667a97b Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 24 Sep 2024 09:00:11 +0100 Subject: [PATCH 041/141] Update docs on in_product_since, specify that lifecycle is now preferred Signed-off-by: Andrii Sultanov --- .../xapi/guides/howtos/add-function.md | 71 ++++++------------- 1 file changed, 22 insertions(+), 49 deletions(-) diff --git a/doc/content/xapi/guides/howtos/add-function.md b/doc/content/xapi/guides/howtos/add-function.md index 07ef3cebfd4..8aeedfb27fb 100644 --- a/doc/content/xapi/guides/howtos/add-function.md +++ b/doc/content/xapi/guides/howtos/add-function.md @@ -22,7 +22,7 @@ The function to describe the new message will look something like the following: let host_price_of = call ~flags:[`Session] ~name:"price_of" ~in_oss_since:None - ~in_product_since:rel_orlando + ~lifecycle:[] ~params:[(Ref _host, "host", "The host containing the price information"); (String, "item", "The item whose price is queried")] ~result:(Float, "The price of the item") @@ -41,15 +41,14 @@ host_price_of is added to the messages of the host class: ] ... -The parameters passed to call are all optional (except ~name and ~in_product_since). +The parameters passed to call are all optional (except ~name and ~lifecycle). - The ~flags parameter is used to set conditions for the use of the message. For example, `Session is used to indicate that the call must be made in the presence of an existing session. -- The value of the ~in_product_since parameter is a string taken from - `idl/datamodel_types.ml` indicates the XenServer release in which this - message was first introduced. +- The value of the `~lifecycle` parameter should be `[]` in new code, with dune + automatically generating appropriate values (`datamodel_lifecycle.ml`) - The ~params parameter describes a list of the formal parameters of the message. Each parameter is described by a triple. The first component of the triple is @@ -66,7 +65,7 @@ The parameters passed to call are all optional (except ~name and ~in_product_sin - The bool ~hide_from_docs parameter prevents the message from being included in the documentation when generated. -- The bool ~pool_internal parameter is used to indicate if the message should be callable by external systems or only internal hosts. +- The bool ~pool_internal parameter is used to indicate if the message should be callable by external systems or only internal hosts. - The ~errs parameter is a list of possible exceptions that the message can raise. @@ -76,53 +75,27 @@ The parameters passed to call are all optional (except ~name and ~in_product_sin Compiling `xen-api.(hg|git)` will cause the code corresponding to this message -to be generated and output in `ocaml/xapi/server.ml`. In the example above, a +to be generated and output in `ocaml/xapi/server.ml`. In the example above, a section handling an incoming call host.price_of appeared in `ocaml/xapi/server.ml`. -However, after this was generated, the rest of the build failed because this +However, after this was generated, the rest of the build failed because this call expects a price_of function in the Host object. -Expected values in parameter ~in_product_since ----------------------------------------------- - -In the example above, the value of the parameter ~in_product_since informs that -the message host_price_of was added during the rel_orlando release cycle. If a -new release cycle is required, then it needs to be added in the file -`idl/datamodel_types.ml`. The patch below shows how the new rel_george release -identifier was added. Any class, message, etc. added during the rel_george -release cycle should contain ~in_product_since:rel_george entries. -(obs: the release and upgrade infrastructure can handle only one new -`rel_*` identifier -- in this case, rel_george -- in each release) - - --- a/ocaml/idl/datamodel_types.ml Tue Nov 11 15:17:48 2008 +0000 - +++ b/ocaml/idl/datamodel_types.ml Tue Nov 11 15:53:29 2008 +0000 - @@ -27,14 +27,13 @@ - (* useful constants for product vsn tracking *) - let oss_since_303 = Some "3.0.3" - +let rel_george = "george" - let rel_orlando = "orlando" - let rel_orlando_update_1 = "orlando-update-1" - let rel_symc = "symc" - let rel_miami = "miami" - let rel_rio = "rio" - -let release_order = [engp:rel_rio; rel_miami; rel_symc; rel_orlando; rel_orlando_update_1] - +let release_order = [engp:rel_rio; rel_miami; rel_symc; rel_orlando; rel_orlando_update_1; rel_george] - Update expose_get_all_messages_for list --------------------------------------- -If you are adding a new class, do not forget to add your new class \_name to -the expose_get_all_messages_for list, at the bottom of datamodel.ml, in +If you are adding a new class, do not forget to add your new class \_name to +the expose_get_all_messages_for list, at the bottom of datamodel.ml, in order to have automatically generated get_all and get_all_records functions attached to it. Update the RBAC field containing the roles expected to use the new API call --------------------------------------------------------------------------- -After the RBAC integration, Xapi provides by default a set of static roles +After the RBAC integration, Xapi provides by default a set of static roles associated to the most common subject tasks. The api calls associated with each role are defined by a new `~allowed_roles` -parameter in each api call, which specifies the list of static roles that +parameter in each api call, which specifies the list of static roles that should be able to execute the call. The possible roles for this list is one of the following names, defined in `datamodel.ml`: @@ -137,16 +110,16 @@ So, for instance, ~allowed_roles:[role_pool_admin,role_pool_operator] (* this is not the recommended usage, see example below *) -would be a valid list (though it is not the recommended way of using +would be a valid list (though it is not the recommended way of using allowed_roles, see below), meaning that subjects belonging to either role_pool_admin or role_pool_operator can execute the api call. -The RBAC requirements define a policy where the roles in the list above are -supposed to be totally-ordered by the set of api-calls associated with each of -them. That means that any api-call allowed to role_pool_operator should also be -in role_pool_admin; any api-call allowed to role_vm_power_admin should also be -in role_pool_operator and also in role_pool_admin; and so on. Datamodel.ml -provides shortcuts for expressing these totally-ordered set of roles policy +The RBAC requirements define a policy where the roles in the list above are +supposed to be totally-ordered by the set of api-calls associated with each of +them. That means that any api-call allowed to role_pool_operator should also be +in role_pool_admin; any api-call allowed to role_vm_power_admin should also be +in role_pool_operator and also in role_pool_admin; and so on. Datamodel.ml +provides shortcuts for expressing these totally-ordered set of roles policy associated with each api-call: - \_R_POOL_ADMIN, equivalent to [role_pool_admin] @@ -158,11 +131,11 @@ associated with each api-call: The `~allowed_roles` parameter should use one of the shortcuts in the list above, instead of directly using a list of roles, because the shortcuts above make sure -that the roles in the list are in a total order regarding the api-calls +that the roles in the list are in a total order regarding the api-calls permission sets. Creating an api-call with e.g. allowed_roles:[role_pool_admin,role_vm_admin] would be wrong, because that -would mean that a pool_operator cannot execute the api-call that a vm_admin can, -breaking the total-order policy expected in the RBAC 1.0 implementation. +would mean that a pool_operator cannot execute the api-call that a vm_admin can, +breaking the total-order policy expected in the RBAC 1.0 implementation. In the future, this requirement might be relaxed. So, the example above should instead be used as: @@ -224,7 +197,7 @@ We add the following function to `xapi/xapi_host.ml`: let price_of ~__context ~host ~item = if item = "fish" then 3.14 else 0.00 - + We also need to add the function to the interface `xapi/xapi_host.mli`: val price_of : From 29685fb2c388f562ac337f32e885667ab33a57cc Mon Sep 17 00:00:00 2001 From: Benjamin Reis Date: Tue, 24 Sep 2024 13:59:59 +0200 Subject: [PATCH 042/141] Do not assume IPv4 when disabling management Instead look at the inventory to decide the address type, this will avoid to override the value in the inventory when its rewritten Signed-off-by: Benjamin Reis --- ocaml/xapi/xapi_host.ml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_host.ml b/ocaml/xapi/xapi_host.ml index aa2f07e2fba..e8162430943 100644 --- a/ocaml/xapi/xapi_host.ml +++ b/ocaml/xapi/xapi_host.ml @@ -1303,7 +1303,11 @@ let management_disable ~__context = raise (Api_errors.Server_error (Api_errors.slave_requires_management_iface, [])) ; (* Reset the management server *) - Xapi_mgmt_iface.change "" `IPv4 ; + let management_address_type = + Record_util.primary_address_type_of_string + Xapi_inventory.(lookup _management_address_type) + in + Xapi_mgmt_iface.change "" management_address_type ; Xapi_mgmt_iface.run ~__context ~mgmt_enabled:false () ; (* Make sure all my PIFs are marked appropriately *) Xapi_pif.update_management_flags ~__context From 9e6e0155dd44a9d0e724fe257095159d529db664 Mon Sep 17 00:00:00 2001 From: Colin James Date: Tue, 24 Sep 2024 13:42:06 +0100 Subject: [PATCH 043/141] CP-51034: Update datamodel_lifecycle.ml Signed-off-by: Colin James --- ocaml/idl/datamodel_lifecycle.ml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ocaml/idl/datamodel_lifecycle.ml b/ocaml/idl/datamodel_lifecycle.ml index bcd67b50acb..c16a4374342 100644 --- a/ocaml/idl/datamodel_lifecycle.ml +++ b/ocaml/idl/datamodel_lifecycle.ml @@ -95,6 +95,12 @@ let prototyped_of_field = function Some "23.9.0" | "pool", "telemetry_uuid" -> Some "23.9.0" + | "pool", "ext_auth_cache_expiry" -> + Some "24.30.0-next" + | "pool", "ext_auth_cache_size" -> + Some "24.30.0-next" + | "pool", "ext_auth_cache_enabled" -> + Some "24.30.0-next" | "pool", "ext_auth_max_threads" -> Some "23.27.0" | "pool", "local_auth_max_threads" -> @@ -163,6 +169,12 @@ let prototyped_of_message = function Some "24.19.1" | "pool", "get_guest_secureboot_readiness" -> Some "24.17.0" + | "pool", "set_ext_auth_cache_expiry" -> + Some "24.30.0-next" + | "pool", "set_ext_auth_cache_size" -> + Some "24.30.0-next" + | "pool", "set_ext_auth_cache_enabled" -> + Some "24.30.0-next" | "pool", "set_ext_auth_max_threads" -> Some "23.27.0" | "pool", "set_local_auth_max_threads" -> From 84667338b32bc403c16b5980759bd3494eb9477f Mon Sep 17 00:00:00 2001 From: Benjamin Reis Date: Tue, 24 Sep 2024 14:09:18 +0200 Subject: [PATCH 044/141] Write IPv6 management config as well when ejecting an host Until now only the IPv4 config was written and an IPv6 ejected host would loose its config after restarting in its own pool. Signed-off-by: Benjamin Reis --- ocaml/xapi/xapi_pool.ml | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index 39b5dbd447b..ef79c86cad1 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -1892,6 +1892,11 @@ let eject_self ~__context ~host = | `Static -> "static" in + let mode_v6 = + Record_util.ipv6_configuration_mode_to_string + pif.API.pIF_ipv6_configuration_mode + |> String.uncapitalize_ascii + in let write_first_boot_management_interface_configuration_file () = (* During firstboot, now inventory has an empty MANAGEMENT_INTERFACE *) let bridge = "" in @@ -1905,7 +1910,11 @@ let eject_self ~__context ~host = (* If the management_interface exists on a vlan, write the vlan id into management.conf *) let vlan_id = Int64.to_int pif.API.pIF_VLAN in let config_base = - [sprintf "LABEL='%s'" management_device; sprintf "MODE='%s'" mode] + [ + sprintf "LABEL='%s'" management_device + ; sprintf "MODE='%s'" mode + ; sprintf "MODEV6='%s'" mode_v6 + ] in let config_static = if mode <> "static" then @@ -1915,9 +1924,23 @@ let eject_self ~__context ~host = sprintf "IP='%s'" pif.API.pIF_IP ; sprintf "NETMASK='%s'" pif.API.pIF_netmask ; sprintf "GATEWAY='%s'" pif.API.pIF_gateway - ; sprintf "DNS='%s'" pif.API.pIF_DNS ] in + let configv6_static = + if mode_v6 <> "static" then + [] + else + [ + sprintf "IPv6='%s'" (String.concat "," pif.API.pIF_IPv6) + ; sprintf "IPv6_GATEWAY='%s'" pif.API.pIF_ipv6_gateway + ] + in + let config_dns = + if mode = "static" || mode_v6 = "static" then + [sprintf "DNS='%s'" pif.API.pIF_DNS] + else + [] + in let config_vlan = if vlan_id = -1 then [] @@ -1925,7 +1948,8 @@ let eject_self ~__context ~host = [sprintf "VLAN='%d'" vlan_id] in let configuration_file = - List.concat [config_base; config_static; config_vlan] + List.concat + [config_base; config_static; configv6_static; config_dns; config_vlan] |> String.concat "\n" in Unixext.write_string_to_file From fae9b559cf40306a2a0a6ac2a11e10a4c7f94572 Mon Sep 17 00:00:00 2001 From: Colin James Date: Wed, 18 Sep 2024 08:52:56 +0100 Subject: [PATCH 045/141] CP-51574: Add explicit reentrant lock to Db_lock Update Db_lock to use an explicit reentrant (recursive) mutex pattern. This is a pretty standard implementation. The way it works is that threads compete to become the "holder" of the lock. The current holder of the lock is identified using thread identifiers (integers) and read/written from/to atomically (by way of Atomic.t). If attempting to acquire the lock (i.e. atomically compare_and_set the holder, with the expected current value as physically equal to None) fails, then the thread begins to wait. Waiting threads wait using a condition variable to avoid busy waiting. The lock can be acquired several times after it is acquired by the calling thread, but releasing it must ensure that every lock is matched by a corresponding unlock (the thread must relinquish every "hold" it has on the lock). Upon successfully releasing its holds on the lock, the thread that is relinquishing control resets the holder (to None) and uses the condition variable to signal a waiting thread to wakeup (and attempt to become the holder). To make this pattern safe and not expose too many details, an interface file (db_lock.mli) is introduced. This interface does not expose the details of the underlying lock, but rather the single function: val with_lock : (unit -> 'a) -> 'a which ensures the evaluation of its argument is properly sandwiched between code that correctly acquires and releases the lock (taking care to avoid holding onto the lock during exceptional circumstances, e.g. by way of an exception that is unhandled). Code written to use the database lock currently only use this interface to acquire the Db_lock, so no other changes are required. A follow-up change may be to enforce the same usage pattern for the global database flush lock, which is seemingly only used in the same way. Signed-off-by: Colin James --- ocaml/database/db_lock.ml | 168 +++++++++++++++++++++++++-------- ocaml/database/db_lock.mli | 24 +++++ ocaml/database/dune | 3 + ocaml/xapi/xapi_diagnostics.ml | 11 ++- quality-gate.sh | 2 +- 5 files changed, 161 insertions(+), 47 deletions(-) create mode 100644 ocaml/database/db_lock.mli diff --git a/ocaml/database/db_lock.ml b/ocaml/database/db_lock.ml index 2c149fca804..3b752dd5f39 100644 --- a/ocaml/database/db_lock.ml +++ b/ocaml/database/db_lock.ml @@ -11,60 +11,146 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(* Lock shared between client/slave implementations *) -open Xapi_stdext_pervasives.Pervasiveext +module type REENTRANT_LOCK = sig + type t -(* Withlock takes dbcache_mutex, and ref-counts to allow the same thread to re-enter without blocking as many times - as it wants. *) -let dbcache_mutex = Mutex.create () + (** Timing statistics modified by each thread after the lock is + initially acquired. *) + type statistics = { + mutable max_time: float + ; mutable min_time: float + ; mutable total_time: float + ; mutable acquires: int + } -let time = ref 0.0 + val create : unit -> t + (** Creates an instance of a reentrant lock. *) -let n = ref 0 + val lock : t -> unit + (** [lock l] acquires the lock [l]. If the calling thread already + holds the lock, the implementation internally increases the number + of "holds" the thread has on the lock. Each call to [lock] must + have a corresponding call to [unlock] or else it is an error. *) -let maxtime = ref neg_infinity + val unlock : t -> unit + (** [unlock l] releases a hold on the lock. If the hold count + becomes 0, the lock is free to be acquired by other threads. It is + an error to call this from a thread that does not hold the lock. *) -let mintime = ref infinity + val statistics : t -> statistics + (** Returns a copy of the internal timing statistics maintained by + the implementation. Calling this has the effect of temporarily + acquiring the lock, as only the lock holder can read or modify the + internal record. *) +end -let thread_reenter_count = ref 0 +(** A simple re-entrant lock (recursive mutex). *) +module ReentrantLock : REENTRANT_LOCK = struct + type tid = int -let allow_thread_through_dbcache_mutex = ref None + type statistics = { + mutable max_time: float + ; mutable min_time: float + ; mutable total_time: float + ; mutable acquires: int + } -let with_lock f = - let me = Thread.id (Thread.self ()) in - let do_with_lock () = - let now = Unix.gettimeofday () in - Mutex.lock dbcache_mutex ; - let now2 = Unix.gettimeofday () in - let delta = now2 -. now in - time := !time +. delta ; - n := !n + 1 ; - maxtime := max !maxtime delta ; - mintime := min !mintime delta ; - allow_thread_through_dbcache_mutex := Some me ; - thread_reenter_count := 1 ; - finally f (fun () -> - thread_reenter_count := !thread_reenter_count - 1 ; - if !thread_reenter_count = 0 then ( - allow_thread_through_dbcache_mutex := None ; - Mutex.unlock dbcache_mutex + type t = { + holder: tid option Atomic.t (* The holder of the lock *) + ; mutable holds: int (* How many holds the holder has on the lock *) + ; lock: Mutex.t (* Barrier to signal waiting threads *) + ; condition: Condition.t + (* Waiting threads are signalled via this condition to reattempt to acquire the lock *) + ; statistics: statistics (* Bookkeeping of time taken to acquire lock *) + } + + let create_statistics () = + {max_time= neg_infinity; min_time= infinity; total_time= 0.; acquires= 0} + + let create () = + { + holder= Atomic.make None + ; holds= 0 + ; lock= Mutex.create () + ; condition= Condition.create () + ; statistics= create_statistics () + } + + let current_tid () = Thread.(self () |> id) + + let lock l = + let me = current_tid () in + match Atomic.get l.holder with + | Some tid when tid = me -> + l.holds <- l.holds + 1 + | _ -> + let intended = Some me in + let counter = Mtime_clock.counter () in + Mutex.lock l.lock ; + while not (Atomic.compare_and_set l.holder None intended) do + Condition.wait l.condition l.lock + done ; + let stats = l.statistics in + let delta = Clock.Timer.span_to_s (Mtime_clock.count counter) in + stats.total_time <- stats.total_time +. delta ; + stats.min_time <- Float.min delta stats.min_time ; + stats.max_time <- Float.max delta stats.max_time ; + stats.acquires <- stats.acquires + 1 ; + Mutex.unlock l.lock ; + l.holds <- 1 + + let unlock l = + let me = current_tid () in + match Atomic.get l.holder with + | Some tid when tid = me -> + l.holds <- l.holds - 1 ; + if l.holds = 0 then ( + let () = Atomic.set l.holder None in + Mutex.lock l.lock ; + Condition.signal l.condition ; + Mutex.unlock l.lock ) - ) - in - match !allow_thread_through_dbcache_mutex with - | None -> - do_with_lock () - | Some id -> - if id = me then ( - thread_reenter_count := !thread_reenter_count + 1 ; - finally f (fun () -> thread_reenter_count := !thread_reenter_count - 1) - ) else - do_with_lock () + | _ -> + failwith + (Printf.sprintf "%s: Calling thread does not hold the lock!" + __MODULE__ + ) + + let statistics l = + lock l ; + let stats = + (* Force a deep copy of the mutable fields *) + let ({acquires; _} as original) = l.statistics in + {original with acquires} + in + unlock l ; stats +end + +(* The top-level database lock that writers must acquire. *) +let db_lock = ReentrantLock.create () (* Global flush lock: all db flushes are performed holding this lock *) (* When we want to prevent the database from being flushed for a period (e.g. when doing a host backup in the OEM product) then we acquire this lock *) let global_flush_mutex = Mutex.create () -let report () = (!n, !time /. float_of_int !n, !mintime, !maxtime) +let with_lock f = + let open Xapi_stdext_pervasives.Pervasiveext in + ReentrantLock.( + lock db_lock ; + finally f (fun () -> unlock db_lock) + ) + +type report = {count: int; avg_time: float; min_time: float; max_time: float} + +let report () = + let ReentrantLock.{max_time; min_time; total_time; acquires} = + ReentrantLock.statistics db_lock + in + { + count= acquires + ; avg_time= total_time /. float_of_int acquires + ; min_time + ; max_time + } diff --git a/ocaml/database/db_lock.mli b/ocaml/database/db_lock.mli new file mode 100644 index 00000000000..0771a944eff --- /dev/null +++ b/ocaml/database/db_lock.mli @@ -0,0 +1,24 @@ +(* + * Copyright (c) Cloud Software Group + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +val global_flush_mutex : Mutex.t + +val with_lock : (unit -> 'a) -> 'a +(** [with_lock f] executes [f] in a context where the calling thread + holds the database lock. It is safe to nest such calls as the + underlying lock is reentrant (a recursive mutex). *) + +type report = {count: int; avg_time: float; min_time: float; max_time: float} + +val report : unit -> report diff --git a/ocaml/database/dune b/ocaml/database/dune index 14ac44931bd..5c2e500a399 100644 --- a/ocaml/database/dune +++ b/ocaml/database/dune @@ -27,6 +27,9 @@ (libraries forkexec gzip + mtime + mtime.clock.os + clock rpclib.core rpclib.json safe-resources diff --git a/ocaml/xapi/xapi_diagnostics.ml b/ocaml/xapi/xapi_diagnostics.ml index c765867a987..ee67dc34b13 100644 --- a/ocaml/xapi/xapi_diagnostics.ml +++ b/ocaml/xapi/xapi_diagnostics.ml @@ -36,12 +36,13 @@ let gc_stats ~__context ~host:_ = let db_stats ~__context = (* Use Printf.sprintf to keep format *) - let n, avgtime, min, max = Xapi_database.Db_lock.report () in + let open Xapi_database in + let Db_lock.{count; avg_time; min_time; max_time} = Db_lock.report () in [ - ("n", Printf.sprintf "%d" n) - ; ("avgtime", Printf.sprintf "%f" avgtime) - ; ("min", Printf.sprintf "%f" min) - ; ("max", Printf.sprintf "%f" max) + ("n", Printf.sprintf "%d" count) + ; ("avgtime", Printf.sprintf "%f" avg_time) + ; ("min", Printf.sprintf "%f" min_time) + ; ("max", Printf.sprintf "%f" max_time) ] let network_stats ~__context ~host:_ ~params = diff --git a/quality-gate.sh b/quality-gate.sh index 8e5a6ce8c26..47e97fa37e2 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -25,7 +25,7 @@ verify-cert () { } mli-files () { - N=510 + N=509 # do not count ml files from the tests in ocaml/{tests/perftest/quicktest} MLIS=$(git ls-files -- '**/*.mli' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) MLS=$(git ls-files -- '**/*.ml' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) From 7e34b3ae41e10180a52b622136dcfa8d2f7f8418 Mon Sep 17 00:00:00 2001 From: Ming Lu Date: Thu, 26 Sep 2024 15:54:25 +0800 Subject: [PATCH 046/141] CA-399638: The livepatches are absent in the response of /updates When applying livepatches, for one component, only the latest one will be applied. This is because the latest livepatch will always roll up all the previous ones if they are of the same component and base build ID. The bug to be fixed in this commit is that the previous livepatches with the same build ID are not returned in the response of the query on the /updates HTTP endpoint. Signed-off-by: Ming Lu --- ocaml/xapi/repository_helpers.ml | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/ocaml/xapi/repository_helpers.ml b/ocaml/xapi/repository_helpers.ml index 8a184e52f4c..51699612739 100644 --- a/ocaml/xapi/repository_helpers.ml +++ b/ocaml/xapi/repository_helpers.ml @@ -1298,12 +1298,22 @@ let with_access_token ~token ~token_id f = let msg = Printf.sprintf "%s: The token or token_id is empty" __LOC__ in raise Api_errors.(Server_error (internal_error, [msg])) -let prune_updateinfo_for_livepatches livepatches updateinfo = - let open UpdateInfo in - let lps = - List.filter (fun x -> LivePatchSet.mem x livepatches) updateinfo.livepatches +let prune_updateinfo_for_livepatches latest_lps updateinfo = + let livepatches = + let open LivePatch in + (* Keep a livepatch if it is rolled up by one of the latest livepatches. + * The latest livepatches are the ones to be applied actually. + *) + updateinfo.UpdateInfo.livepatches + |> List.filter (fun lp -> + let is_rolled_up_by latest = + latest.component = lp.component + && latest.base_build_id = lp.base_build_id + in + LivePatchSet.exists is_rolled_up_by latest_lps + ) in - {updateinfo with livepatches= lps} + {updateinfo with livepatches} let do_with_host_pending_guidances ~op guidances = List.iter From 9db858a13257b4b19b37bfc0a09276909f8a77a2 Mon Sep 17 00:00:00 2001 From: Ming Lu Date: Thu, 26 Sep 2024 19:04:29 +0800 Subject: [PATCH 047/141] CA-399638: Add unit tests Signed-off-by: Ming Lu --- ocaml/tests/test_repository_helpers.ml | 30 +++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/ocaml/tests/test_repository_helpers.ml b/ocaml/tests/test_repository_helpers.ml index dbb5b7f1a42..775c7635665 100644 --- a/ocaml/tests/test_repository_helpers.ml +++ b/ocaml/tests/test_repository_helpers.ml @@ -3881,7 +3881,29 @@ module PruneUpdateInfoForLivepatches = Generic.MakeStateless (struct ; base_build_id= "2cc28689364587682593b6a72e2a586d29996bb9" ; base_version= "4.19.19" ; base_release= "8.0.20.xs8" - ; to_version= "4.13.4" + ; to_version= "4.19.19" + ; to_release= "8.0.21.xs8" + } + + let lp2 = + LivePatch. + { + component= Livepatch.Kernel + ; base_build_id= "2cc28689364587682593b6a72e2a586d29996bb9" + ; base_version= "4.19.19" + ; base_release= "8.0.20.xs8" + ; to_version= "4.19.20" + ; to_release= "8.0.21.xs8" + } + + let lp3 = + LivePatch. + { + component= Livepatch.Kernel + ; base_build_id= "4cc28689364587682593b6a72e2a586d29996bb9" + ; base_version= "4.19.20" + ; base_release= "7.0.20.xs8" + ; to_version= "4.13.5" ; to_release= "8.0.21.xs8" } @@ -3915,6 +3937,12 @@ module PruneUpdateInfoForLivepatches = Generic.MakeStateless (struct ; ( ([], {updateinfo with livepatches= [lp0; lp1]}) , {updateinfo with livepatches= []} ) + ; ( ([lp0; lp2], {updateinfo with livepatches= [lp0; lp1; lp2; lp3]}) + , {updateinfo with livepatches= [lp0; lp1; lp2]} + ) + ; ( ([lp0], {updateinfo with livepatches= [lp0; lp1; lp2; lp3]}) + , {updateinfo with livepatches= [lp0]} + ) ] end) From 0693477fc6e813938b9db97de7bd4097e4e5973a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Thu, 26 Sep 2024 15:04:58 +0100 Subject: [PATCH 048/141] CP-49135: add Uuid generation benchmark MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We generate uuids frequently, e.g. when creating sessions. Signed-off-by: Edwin Török --- ocaml/tests/bench/bench_uuid.ml | 10 ++++++++++ ocaml/tests/bench/dune | 6 +++--- 2 files changed, 13 insertions(+), 3 deletions(-) create mode 100644 ocaml/tests/bench/bench_uuid.ml diff --git a/ocaml/tests/bench/bench_uuid.ml b/ocaml/tests/bench/bench_uuid.ml new file mode 100644 index 00000000000..f13118e48db --- /dev/null +++ b/ocaml/tests/bench/bench_uuid.ml @@ -0,0 +1,10 @@ +open Bechamel + +let benchmarks = + Test.make_grouped ~name:"uuidx creation" + [ + Test.make ~name:"Uuidx.make_uuid_urnd" (Staged.stage Uuidx.make_uuid_urnd) + ; Test.make ~name:"Uuidx.make" (Staged.stage Uuidx.make) + ] + +let () = Bechamel_simple_cli.cli benchmarks diff --git a/ocaml/tests/bench/dune b/ocaml/tests/bench/dune index 0d11700e285..dcd61813e1e 100644 --- a/ocaml/tests/bench/dune +++ b/ocaml/tests/bench/dune @@ -1,4 +1,4 @@ -(executable - (name bench_tracing) - (libraries tracing bechamel bechamel-notty notty.unix tracing_export threads.posix fmt notty) +(executables + (names bench_tracing bench_uuid) + (libraries tracing bechamel bechamel-notty notty.unix tracing_export threads.posix fmt notty uuid) ) From a0176da7352f573448e3178df81172af6394f34d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 24 Apr 2024 16:23:48 +0100 Subject: [PATCH 049/141] CP-49135: open /dev/urandom just once MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reduces the amount of file descriptors kept open, and avoids 2 syscalls when creating sessions. Creating session uuids/refs is ~3.8x faster now: Before: ``` uuidx creation/Uuidx.make_uuid_urnd (ns): { monotonic-clock per run = 5224.741331 (confidence: 5263.321869 to 5187.101530); r² = Some 0.997606 } ``` After: ``` uuidx creation/Uuidx.make_uuid_urnd (ns): { monotonic-clock per run = 1369.962560 (confidence: 1378.331674 to 1362.496608); r² = Some 0.998731 } ``` Adjust forkexecd test, where the called program is an OCaml program linked with Uuidx, and thus has 1 extra FD open (that it didn't receive/inherit from the parent). Signed-off-by: Edwin Török --- ocaml/forkexecd/test/fe_test.ml | 2 +- ocaml/libs/uuid/uuidx.ml | 34 ++++++++++++++------------------- 2 files changed, 15 insertions(+), 21 deletions(-) diff --git a/ocaml/forkexecd/test/fe_test.ml b/ocaml/forkexecd/test/fe_test.ml index 870ac591601..1c5e46bc1f9 100644 --- a/ocaml/forkexecd/test/fe_test.ml +++ b/ocaml/forkexecd/test/fe_test.ml @@ -292,7 +292,7 @@ let slave = function (* Printf.fprintf stderr "%s %d\n" total_fds (List.length present - 1) *) - if total_fds <> List.length filtered then + if total_fds + 1 (* Uuid.dev_urandom *) <> List.length filtered then fail "Expected %d fds; /proc/self/fd has %d: %s" total_fds (List.length filtered) ls diff --git a/ocaml/libs/uuid/uuidx.ml b/ocaml/libs/uuid/uuidx.ml index 01dbda46899..2a86cab723a 100644 --- a/ocaml/libs/uuid/uuidx.ml +++ b/ocaml/libs/uuid/uuidx.ml @@ -38,26 +38,20 @@ let is_uuid str = match of_string str with None -> false | Some _ -> true let dev_urandom = "/dev/urandom" +let dev_urandom_fd = Unix.openfile dev_urandom [Unix.O_RDONLY] 0o640 +(* we can't close this in at_exit, because Crowbar runs at_exit, and + it'll fail because this FD will then be closed +*) + let read_bytes dev n = - let fd = Unix.openfile dev [Unix.O_RDONLY] 0o640 in - let finally body_f clean_f = - try - let ret = body_f () in - clean_f () ; ret - with e -> clean_f () ; raise e - in - finally - (fun () -> - let buf = Bytes.create n in - let read = Unix.read fd buf 0 n in - if read <> n then - raise End_of_file - else - Bytes.to_string buf - ) - (fun () -> Unix.close fd) - -let make_uuid_urnd () = of_bytes (read_bytes dev_urandom 16) |> Option.get + let buf = Bytes.create n in + let read = Unix.read dev buf 0 n in + if read <> n then + raise End_of_file + else + Bytes.to_string buf + +let make_uuid_urnd () = of_bytes (read_bytes dev_urandom_fd 16) |> Option.get (* Use the CSPRNG-backed urandom *) let make = make_uuid_urnd @@ -65,7 +59,7 @@ let make = make_uuid_urnd type cookie = string let make_cookie () = - read_bytes dev_urandom 64 + read_bytes dev_urandom_fd 64 |> String.to_seq |> Seq.map (fun c -> Printf.sprintf "%1x" (int_of_char c)) |> List.of_seq From 2290c51b0ca87df23c850679408ea8f517bd2bd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 27 Sep 2024 16:01:59 +0100 Subject: [PATCH 050/141] CP-49135: move rpc conversion functions into proper Ref module MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will allow restricting which refs can be created from strings, without breaking RPC (the RPC code inside ref.ml will have access to the string conversion functions, but external users can get a more restricted interface that e.g. wouldn't allow constructing secret Refs from arbitrary strings) No functional change. Signed-off-by: Edwin Török --- ocaml/idl/ocaml_backend/gen_api.ml | 10 +--------- ocaml/xapi-types/ref.ml | 4 ++++ ocaml/xapi-types/ref.mli | 4 ++++ 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ocaml/idl/ocaml_backend/gen_api.ml b/ocaml/idl/ocaml_backend/gen_api.ml index 5b18d603f4e..1caf9eee138 100644 --- a/ocaml/idl/ocaml_backend/gen_api.ml +++ b/ocaml/idl/ocaml_backend/gen_api.ml @@ -400,15 +400,7 @@ let gen_client_types highapi = ; " Rpc.failure (rpc_of_failure ([\"Fault\"; code]))" ] ; ["include Rpc"; "type string_list = string list [@@deriving rpc]"] - ; [ - "module Ref = struct" - ; " include Ref" - ; " let rpc_of_t (_:'a -> Rpc.t) (x: 'a Ref.t) = rpc_of_string \ - (Ref.string_of x)" - ; " let t_of_rpc (_:Rpc.t -> 'a) x : 'a t = of_string (string_of_rpc \ - x);" - ; "end" - ] + ; ["module Ref = Ref"] ; [ "module Date = struct" ; " open Xapi_stdext_date" diff --git a/ocaml/xapi-types/ref.ml b/ocaml/xapi-types/ref.ml index 32e60c1a2fc..32603c92faf 100644 --- a/ocaml/xapi-types/ref.ml +++ b/ocaml/xapi-types/ref.ml @@ -138,3 +138,7 @@ let really_pretty_and_small x = "NULL" let pp ppf x = Format.fprintf ppf "%s" (string_of x) + +let rpc_of_t _ x = Rpc.rpc_of_string (string_of x) + +let t_of_rpc _ x = of_string (Rpc.string_of_rpc x) diff --git a/ocaml/xapi-types/ref.mli b/ocaml/xapi-types/ref.mli index b61243266d1..ce3f2a797ed 100644 --- a/ocaml/xapi-types/ref.mli +++ b/ocaml/xapi-types/ref.mli @@ -14,6 +14,10 @@ type 'a t +val rpc_of_t : ('a -> Rpc.t) -> 'a t -> Rpc.t + +val t_of_rpc : (Rpc.t -> 'a) -> Rpc.t -> 'a t + val ref_prefix : string val make : unit -> 'a t From e0f9d88edd3aebdccaedbdd7c12c4285b2a86a9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 27 Sep 2024 16:05:19 +0100 Subject: [PATCH 051/141] CP-49135: be consistent in polymorphic type parameter spelling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce an `all` type that contains all possible type parameters. This'll ensure that spelling is consistent, e.g. we've got both VM and Vm, and both Session and session currently. It also prepares for using separate types for secret and non-secret refs/uuids. Fix type signatures: * some signatures just specified 'a Ref.t, these must now be explicit * some signatures specified `API.ref_task Uuidx.t`, instead of `task Uuidx.t`, this is fixed * spelling of VM, Vm, Session, session, Console, console, Crashdump, crashdump, User, user, PBD, pbd is made consistent * test code must chose a concrete type, cannot leave the parameter abstract, use Generic No functional change. Signed-off-by: Edwin Török --- ocaml/libs/uuid/uuid_test.ml | 2 +- ocaml/libs/uuid/uuidx.ml | 75 ++++++++++++++++++++++++++++- ocaml/libs/uuid/uuidx.mli | 80 ++++++++++++++++++++++++++++++- ocaml/mpathalert/mpathalert.ml | 2 +- ocaml/tests/test_ref.ml | 2 +- ocaml/xapi-client/event_helper.ml | 10 ++-- ocaml/xapi-types/ref.ml | 3 ++ ocaml/xapi-types/ref.mli | 4 +- ocaml/xapi/context.mli | 2 +- ocaml/xapi/taskHelper.ml | 2 +- ocaml/xapi/xapi_clustering.mli | 15 +++--- ocaml/xapi/xapi_pif.mli | 2 +- ocaml/xapi/xapi_vdi.mli | 10 ++-- ocaml/xapi/xapi_vif_helpers.mli | 2 +- ocaml/xenopsd/xc/domain.mli | 4 +- 15 files changed, 186 insertions(+), 29 deletions(-) diff --git a/ocaml/libs/uuid/uuid_test.ml b/ocaml/libs/uuid/uuid_test.ml index dbaf294545f..127f10b5824 100644 --- a/ocaml/libs/uuid/uuid_test.ml +++ b/ocaml/libs/uuid/uuid_test.ml @@ -25,7 +25,7 @@ let uuid_arrays = let non_uuid_arrays = [[|0|]; [|0; 1; 2; 3; 4; 5; 6; 7; 8; 9; 10; 11; 12; 13; 14|]] -type resource +type resource = [`Generic] let uuid_testable : (module Alcotest.TESTABLE with type t = resource Uuidx.t) = Alcotest.testable Uuidx.pp Uuidx.equal diff --git a/ocaml/libs/uuid/uuidx.ml b/ocaml/libs/uuid/uuidx.ml index 2a86cab723a..d229ad25909 100644 --- a/ocaml/libs/uuid/uuidx.ml +++ b/ocaml/libs/uuid/uuidx.ml @@ -12,7 +12,80 @@ * GNU Lesser General Public License for more details. *) -type 'a t = Uuidm.t +type all = + [ `auth + | `blob + | `Bond + | `Certificate + | `Cluster + | `Cluster_host + | `console + | `crashdump + | `data_source + | `Diagnostics + | `DR_task + | `event + | `Feature + | `generation + | `Generic + | `GPU_group + | `host + | `host_cpu + | `host_crashdump + | `host_metrics + | `host_patch + | `LVHD + | `message + | `network + | `network_sriov + | `Observer + | `PBD + | `PCI + | `PGPU + | `PIF + | `PIF_metrics + | `pool + | `pool_patch + | `pool_update + | `probe_result + | `PUSB + | `PVS_cache_storage + | `PVS_proxy + | `PVS_server + | `PVS_site + | `Repository + | `role + | `SDN_controller + | `secret + | `session + | `SM + | `SR + | `sr_stat + | `subject + | `task + | `tunnel + | `USB_group + | `user + | `VBD + | `VBD_metrics + | `VDI + | `vdi_nbd_server_info + | `VGPU + | `VGPU_type + | `VIF + | `VIF_metrics + | `VLAN + | `VM + | `VM_appliance + | `VM_group + | `VM_guest_metrics + | `VM_metrics + | `VMPP + | `VMSS + | `VTPM + | `VUSB ] + +type 'a t = Uuidm.t constraint 'a = [< all] let null = Uuidm.nil diff --git a/ocaml/libs/uuid/uuidx.mli b/ocaml/libs/uuid/uuidx.mli index 618235b4ae6..d5658430622 100644 --- a/ocaml/libs/uuid/uuidx.mli +++ b/ocaml/libs/uuid/uuidx.mli @@ -22,9 +22,85 @@ Also, cookies aren't UUIDs and should be put somewhere else. *) +(** all object classes supported by XAPI *) +type all = + [ `auth + | `blob + | `Bond + | `Certificate + | `Cluster + | `Cluster_host + | `console + | `crashdump + | `data_source + | `Diagnostics + | `DR_task + | `event + | `Feature + | `generation + | `Generic + | `GPU_group + | `host + | `host_cpu + | `host_crashdump + | `host_metrics + | `host_patch + | `LVHD + | `message + | `network + | `network_sriov + | `Observer + | `PBD + | `PCI + | `PGPU + | `PIF + | `PIF_metrics + | `pool + | `pool_patch + | `pool_update + | `probe_result + | `PUSB + | `PVS_cache_storage + | `PVS_proxy + | `PVS_server + | `PVS_site + | `Repository + | `role + | `SDN_controller + | `secret + | `session + | `SM + | `SR + | `sr_stat + | `subject + | `task + | `tunnel + | `USB_group + | `user + | `VBD + | `VBD_metrics + | `VDI + | `vdi_nbd_server_info + | `VGPU + | `VGPU_type + | `VIF + | `VIF_metrics + | `VLAN + | `VM + | `VM_appliance + | `VM_group + | `VM_guest_metrics + | `VM_metrics + | `VMPP + | `VMSS + | `VTPM + | `VUSB ] + (** A 128-bit UUID to identify an object of class 'a. For example the UUID of - a host has the type ([\[`host\] Uuidx.t]). *) -type 'a t + a host has the type ([\[`host\] Uuidx.t]). + The type parameter is one of {!type:all} + *) +type 'a t = Uuidm.t constraint 'a = [< all] val null : 'a t (** A null UUID, as if such a thing actually existed. It turns out to be diff --git a/ocaml/mpathalert/mpathalert.ml b/ocaml/mpathalert/mpathalert.ml index c236f602702..bea5ae2ee0a 100644 --- a/ocaml/mpathalert/mpathalert.ml +++ b/ocaml/mpathalert/mpathalert.ml @@ -53,7 +53,7 @@ let debug (fmt : ('a, unit, string, unit) format4) = type t = { host: [`host] Uuidx.t ; host_name: string - ; pbd: [`pbd] Uuidx.t + ; pbd: [`PBD] Uuidx.t ; timestamp: float ; scsi_id: string ; current: int diff --git a/ocaml/tests/test_ref.ml b/ocaml/tests/test_ref.ml index 401746c0690..7213e615e3f 100644 --- a/ocaml/tests/test_ref.ml +++ b/ocaml/tests/test_ref.ml @@ -9,7 +9,7 @@ let uuidm = let ref_of_uuidm uuidm = Ref.ref_prefix ^ (uuidm |> Uuidm.to_string) |> Ref.of_string -type arg +type arg = [`Generic] type t = arg Ref.t diff --git a/ocaml/xapi-client/event_helper.ml b/ocaml/xapi-client/event_helper.ml index 10ef0db12ab..453d2cc2c8d 100644 --- a/ocaml/xapi-client/event_helper.ml +++ b/ocaml/xapi-client/event_helper.ml @@ -13,9 +13,9 @@ *) type event_record = - | Session of [`Session] Ref.t * API.session_t option + | Session of [`session] Ref.t * API.session_t option | Task of [`task] Ref.t * API.task_t option - | Event of [`Event] Ref.t * API.event_t option + | Event of [`event] Ref.t * API.event_t option | VM of [`VM] Ref.t * API.vM_t option | VM_metrics of [`VM_metrics] Ref.t * API.vM_metrics_t option | VM_guest_metrics of @@ -33,10 +33,10 @@ type event_record = | VBD of [`VBD] Ref.t * API.vBD_t option | VBD_metrics of [`VBD_metrics] Ref.t * API.vBD_metrics_t option | PBD of [`PBD] Ref.t * API.pBD_t option - | Crashdump of [`Crashdump] Ref.t * API.crashdump_t option + | Crashdump of [`crashdump] Ref.t * API.crashdump_t option | VTPM of [`VTPM] Ref.t * API.vTPM_t option - | Console of [`Console] Ref.t * API.console_t option - | User of [`User] Ref.t * API.user_t option + | Console of [`console] Ref.t * API.console_t option + | User of [`user] Ref.t * API.user_t option | Pool of [`pool] Ref.t * API.pool_t option | Message of [`message] Ref.t * API.message_t option | Secret of [`secret] Ref.t * API.secret_t option diff --git a/ocaml/xapi-types/ref.ml b/ocaml/xapi-types/ref.ml index 32603c92faf..4b1be2c27d5 100644 --- a/ocaml/xapi-types/ref.ml +++ b/ocaml/xapi-types/ref.ml @@ -12,6 +12,8 @@ * GNU Lesser General Public License for more details. *) +type all = Uuidx.all + type 'a t = | Real of string (* ref to an object in the database *) @@ -20,6 +22,7 @@ type 'a t = | Other of string (* ref used for other purposes (it doesn't have one of the official prefixes) *) | Null + constraint 'a = [< all] (* ref to nothing at all *) diff --git a/ocaml/xapi-types/ref.mli b/ocaml/xapi-types/ref.mli index ce3f2a797ed..6e27078518f 100644 --- a/ocaml/xapi-types/ref.mli +++ b/ocaml/xapi-types/ref.mli @@ -12,7 +12,9 @@ * GNU Lesser General Public License for more details. *) -type 'a t +type all = Uuidx.all + +type 'a t constraint 'a = [< all] val rpc_of_t : ('a -> Rpc.t) -> 'a t -> Rpc.t diff --git a/ocaml/xapi/context.mli b/ocaml/xapi/context.mli index a501db213ff..98e04215272 100644 --- a/ocaml/xapi/context.mli +++ b/ocaml/xapi/context.mli @@ -124,7 +124,7 @@ val __make_task : -> ?session_id:API.ref_session -> ?subtask_of:API.ref_task -> string - -> API.ref_task * API.ref_task Uuidx.t + -> API.ref_task * [`task] Uuidx.t ) ref diff --git a/ocaml/xapi/taskHelper.ml b/ocaml/xapi/taskHelper.ml index 27e30ce3d39..30d36c0ed37 100644 --- a/ocaml/xapi/taskHelper.ml +++ b/ocaml/xapi/taskHelper.ml @@ -29,7 +29,7 @@ type t = API.ref_task (* creates a new task *) let make ~__context ~http_other_config ?(description = "") ?session_id - ?subtask_of label : t * t Uuidx.t = + ?subtask_of label : t * [`task] Uuidx.t = let@ __context = Context.with_tracing ~__context __FUNCTION__ in let uuid = Uuidx.make () in let uuid_str = Uuidx.to_string uuid in diff --git a/ocaml/xapi/xapi_clustering.mli b/ocaml/xapi/xapi_clustering.mli index 7fceae58118..746c538fa79 100644 --- a/ocaml/xapi/xapi_clustering.mli +++ b/ocaml/xapi/xapi_clustering.mli @@ -15,11 +15,14 @@ val set_ha_cluster_stack : __context:Context.t -> unit val with_clustering_lock : string -> (unit -> 'a) -> 'a val pif_of_host : - __context:Context.t -> API.ref_network -> API.ref_host -> 'a Ref.t * API.pIF_t + __context:Context.t + -> API.ref_network + -> API.ref_host + -> API.ref_PIF * API.pIF_t -val ip_of_pif : 'a Ref.t * API.pIF_t -> Cluster_interface.address +val ip_of_pif : API.ref_PIF * API.pIF_t -> Cluster_interface.address -val assert_pif_prerequisites : 'a Ref.t * API.pIF_t -> unit +val assert_pif_prerequisites : API.ref_PIF * API.pIF_t -> unit val assert_pif_attached_to : __context:Context.t -> host:[`host] Ref.t -> pIF:[`PIF] Ref.t -> unit @@ -27,7 +30,7 @@ val assert_pif_attached_to : val handle_error : Cluster_interface.error -> 'a val assert_cluster_host_can_be_created : - __context:Context.t -> host:'a Ref.t -> unit + __context:Context.t -> host:API.ref_host -> unit val get_required_cluster_stacks : __context:Context.t -> sr_sm_type:string -> string list @@ -41,7 +44,7 @@ val with_clustering_lock_if_cluster_exists : __context:Context.t -> string -> (unit -> 'a) -> 'a val find_cluster_host : - __context:Context.t -> host:[`host] Ref.t -> 'a Ref.t option + __context:Context.t -> host:[`host] Ref.t -> API.ref_Cluster_host option val get_network_internal : __context:Context.t -> self:[`Cluster] Ref.t -> [`network] Ref.t @@ -69,7 +72,7 @@ val rpc : __context:Context.t -> Rpc.call -> Rpc.response Idl.IdM.t val maybe_switch_cluster_stack_version : __context:Context.t - -> self:'a Ref.t + -> self:API.ref_Cluster_host -> cluster_stack:Cluster_interface.Cluster_stack.t -> unit diff --git a/ocaml/xapi/xapi_pif.mli b/ocaml/xapi/xapi_pif.mli index 07c3a85877c..6c83936c1aa 100644 --- a/ocaml/xapi/xapi_pif.mli +++ b/ocaml/xapi/xapi_pif.mli @@ -247,7 +247,7 @@ val update_management_flags : __context:Context.t -> host:[`host] Ref.t -> unit * which holds the bridge of the management interface in the MANAGEMENT_INTERFACE field. *) val calculate_pifs_required_at_start_of_day : - __context:Context.t -> ('b Ref.t * API.pIF_t) list + __context:Context.t -> (API.ref_PIF * API.pIF_t) list (** Returns the set of PIF references + records which we want to be plugged in by the end of the start of day code. These are the PIFs on the localhost that are not bond slaves. For PIFs that have [disallow_unplug] set to true, and the management interface, will diff --git a/ocaml/xapi/xapi_vdi.mli b/ocaml/xapi/xapi_vdi.mli index ff3e5a9e0ec..0731a5f6082 100644 --- a/ocaml/xapi/xapi_vdi.mli +++ b/ocaml/xapi/xapi_vdi.mli @@ -22,8 +22,8 @@ val check_operation_error : __context:Context.t -> ?sr_records:'a list - -> ?pbd_records:('b API.Ref.t * API.pBD_t) list - -> ?vbd_records:('c API.Ref.t * Db_actions.vBD_t) list + -> ?pbd_records:(API.ref_PBD * API.pBD_t) list + -> ?vbd_records:(API.ref_VBD * Db_actions.vBD_t) list -> bool -> Db_actions.vDI_t -> API.ref_VDI @@ -39,8 +39,8 @@ val update_allowed_operations_internal : __context:Context.t -> self:[`VDI] API.Ref.t -> sr_records:'a list - -> pbd_records:('b API.Ref.t * API.pBD_t) list - -> ?vbd_records:('c API.Ref.t * Db_actions.vBD_t) list + -> pbd_records:(API.ref_PBD * API.pBD_t) list + -> ?vbd_records:(API.ref_VBD * Db_actions.vBD_t) list -> unit -> unit @@ -50,7 +50,7 @@ val update_allowed_operations : val cancel_tasks : __context:Context.t -> self:[`VDI] API.Ref.t - -> all_tasks_in_db:'a Ref.t list + -> all_tasks_in_db:API.ref_task list -> task_ids:string list -> unit diff --git a/ocaml/xapi/xapi_vif_helpers.mli b/ocaml/xapi/xapi_vif_helpers.mli index 0f3ef24955b..6451ba02ddc 100644 --- a/ocaml/xapi/xapi_vif_helpers.mli +++ b/ocaml/xapi/xapi_vif_helpers.mli @@ -25,7 +25,7 @@ val update_allowed_operations : __context:Context.t -> self:[`VIF] Ref.t -> unit val cancel_tasks : __context:Context.t -> self:[`VIF] Ref.t - -> all_tasks_in_db:'a Ref.t list + -> all_tasks_in_db:API.ref_task list -> task_ids:string list -> unit (** Cancel all current operations. *) diff --git a/ocaml/xenopsd/xc/domain.mli b/ocaml/xenopsd/xc/domain.mli index 598a9efc3d9..c8f83b0994a 100644 --- a/ocaml/xenopsd/xc/domain.mli +++ b/ocaml/xenopsd/xc/domain.mli @@ -146,7 +146,7 @@ val make : -> create_info -> int -> arch_domainconfig - -> [`Vm] Uuidx.t + -> [`VM] Uuidx.t -> string option -> bool (* no_sharept *) -> domid @@ -294,7 +294,7 @@ val soft_reset : val vcpu_affinity_get : xc:Xenctrl.handle -> domid -> int -> bool array (** Get Cpu affinity of some vcpus of a domain *) -val get_uuid : xc:Xenctrl.handle -> Xenctrl.domid -> [`Vm] Uuidx.t +val get_uuid : xc:Xenctrl.handle -> Xenctrl.domid -> [`VM] Uuidx.t (** Get the uuid from a specific domain *) val set_memory_dynamic_range : From 0ec94ffbb824b23162e1881f047e9a429610f5df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 27 Sep 2024 16:30:05 +0100 Subject: [PATCH 052/141] CP-49135: split secret and non-secret Refs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split Uuidx.all into without_secret and secret parts. This ensures that all sessions are created only with Uuidx.make_uuid_rnd which is guaranteed to use a CSPRNG. Currently Uuidx.make uses the same generator, but that will change in a future commit. Introduce Ref.make_secret, Ref.of_secret_string, and change the other function's signatures so that none of them return a secret (except Ref.null). It is important that uuids/refs that are used as a secret/auth token are generated in a cryptographically safe way, if a user can guess them then they can impersonate an authenticated user. Ref.of_secret_string bypasses these checks, but it is only used in a limited set of places, where we receive session ids from an external source (HTTP requests, or CLI parameters), or test code. No functional change. Signed-off-by: Edwin Török --- ocaml/idl/ocaml_backend/gen_db_actions.ml | 14 ++++++--- ocaml/libs/uuid/uuidx.ml | 9 ++++-- ocaml/libs/uuid/uuidx.mli | 36 +++++++++++++++-------- ocaml/nbd/src/main.ml | 2 +- ocaml/tests/common/test_common.ml | 2 +- ocaml/tests/test_client.ml | 2 +- ocaml/xapi-cli-server/xapi_cli.ml | 3 +- ocaml/xapi-client/event_helper.ml | 4 ++- ocaml/xapi-guard/test/xapi_guard_test.ml | 2 +- ocaml/xapi-types/ref.ml | 14 +++++++++ ocaml/xapi-types/ref.mli | 20 ++++++++++--- ocaml/xapi/helpers.ml | 2 +- ocaml/xapi/import.ml | 2 +- ocaml/xapi/xapi_http.ml | 9 ++++-- ocaml/xapi/xapi_local_session.ml | 2 +- ocaml/xapi/xapi_session.ml | 6 ++-- ocaml/xapi/xapi_vm_migrate.ml | 2 +- 17 files changed, 93 insertions(+), 38 deletions(-) diff --git a/ocaml/idl/ocaml_backend/gen_db_actions.ml b/ocaml/idl/ocaml_backend/gen_db_actions.ml index 44542173fe9..e0cc5cc8454 100644 --- a/ocaml/idl/ocaml_backend/gen_db_actions.ml +++ b/ocaml/idl/ocaml_backend/gen_db_actions.ml @@ -134,8 +134,12 @@ let string_to_dm tys : O.Module.t = | DT.Map (key, value) -> let kf = OU.alias_of_ty key and vf = OU.alias_of_ty value in "fun m -> map " ^ kf ^ " " ^ vf ^ " m" - | DT.Ref _ -> - "fun x -> (Ref.of_string x : " ^ OU.ocaml_of_ty ty ^ ")" + | DT.Ref t -> + "fun x -> (Ref.of_" + ^ (if t = "session" then "secret_" else "") + ^ "string x : " + ^ OU.ocaml_of_ty ty + ^ ")" | DT.Set ty -> "fun s -> set " ^ OU.alias_of_ty ty ^ " s" | DT.String -> @@ -360,7 +364,8 @@ let db_action api : O.Module.t = expr ; Printf.sprintf "List.map (fun (ref,(__regular_fields,__set_refs)) -> \ - Ref.of_string ref, %s __regular_fields __set_refs) records" + Ref.of_%sstring ref, %s __regular_fields __set_refs) records" + (if obj.DT.name = "session" then "secret_" else "") conversion_fn ] ) @@ -374,9 +379,10 @@ let db_action api : O.Module.t = obj.DT.name ; Printf.sprintf "(fun ~__context ~self -> (fun () -> API.rpc_of_%s_t \ - (%s.get_record ~__context ~self:(Ref.of_string self))))" + (%s.get_record ~__context ~self:(Ref.of_%sstring self))))" (OU.ocaml_of_record_name obj.DT.name) (OU.ocaml_of_obj_name obj.DT.name) + (if obj.DT.name = "session" then "secret_" else "") ] () in diff --git a/ocaml/libs/uuid/uuidx.ml b/ocaml/libs/uuid/uuidx.ml index d229ad25909..39c74bd4c8f 100644 --- a/ocaml/libs/uuid/uuidx.ml +++ b/ocaml/libs/uuid/uuidx.ml @@ -12,7 +12,7 @@ * GNU Lesser General Public License for more details. *) -type all = +type without_secret = [ `auth | `blob | `Bond @@ -57,7 +57,6 @@ type all = | `role | `SDN_controller | `secret - | `session | `SM | `SR | `sr_stat @@ -85,6 +84,12 @@ type all = | `VTPM | `VUSB ] +type secret = [`session] + +type not_secret = [without_secret | `session of [`use_make_uuid_rnd_instead]] + +type all = [without_secret | secret] + type 'a t = Uuidm.t constraint 'a = [< all] let null = Uuidm.nil diff --git a/ocaml/libs/uuid/uuidx.mli b/ocaml/libs/uuid/uuidx.mli index d5658430622..31f67a4abbf 100644 --- a/ocaml/libs/uuid/uuidx.mli +++ b/ocaml/libs/uuid/uuidx.mli @@ -22,8 +22,8 @@ Also, cookies aren't UUIDs and should be put somewhere else. *) -(** all object classes supported by XAPI *) -type all = +(** regular UUIDs *) +type without_secret = [ `auth | `blob | `Bond @@ -68,7 +68,6 @@ type all = | `role | `SDN_controller | `secret - | `session | `SM | `SR | `sr_stat @@ -96,22 +95,35 @@ type all = | `VTPM | `VUSB ] +(** ensures that attempting to unify the type with `session yields + an error message about a type conflict, + and also avoids accidentally getting session added to the above + {!type:without_secret} type. + *) +type not_secret = [without_secret | `session of [`use_make_uuid_rnd_instead]] + +(** session UUIDs and Refs are secret: they are effectively authentication tokens *) +type secret = [`session] + +(** all object classes supported by XAPI *) +type all = [without_secret | secret] + (** A 128-bit UUID to identify an object of class 'a. For example the UUID of a host has the type ([\[`host\] Uuidx.t]). The type parameter is one of {!type:all} *) type 'a t = Uuidm.t constraint 'a = [< all] -val null : 'a t +val null : [< not_secret] t (** A null UUID, as if such a thing actually existed. It turns out to be useful though. *) -val make : unit -> 'a t +val make : unit -> [< not_secret] t (** Create a fresh UUID *) -val make_uuid_urnd : unit -> 'a t +val make_uuid_urnd : unit -> [< secret] t -val pp : Format.formatter -> 'a t -> unit +val pp : Format.formatter -> [< not_secret] t -> unit val equal : 'a t -> 'a t -> bool @@ -123,7 +135,7 @@ val of_string : string -> 'a t option val to_string : 'a t -> string (** Marshal a UUID to a string. *) -val uuid_of_string : string -> 'a t option +val uuid_of_string : string -> [< not_secret] t option [@@deprecated "Use of_string"] (** Deprecated alias for {! Uuidx.of_string} *) @@ -131,13 +143,13 @@ val string_of_uuid : 'a t -> string [@@deprecated "Use to_string"] (** Deprecated alias for {! Uuidx.to_string} *) -val of_int_array : int array -> 'a t option +val of_int_array : int array -> [< not_secret] t option (** Convert an array to a UUID. *) val to_int_array : 'a t -> int array (** Convert a UUID to an array. *) -val uuid_of_int_array : int array -> 'a t option +val uuid_of_int_array : int array -> [< not_secret] t option [@@deprecated "Use Uuidx.of_int_array"] (** Deprecated alias for {! Uuidx.of_int_array} *) @@ -145,7 +157,7 @@ val int_array_of_uuid : 'a t -> int array [@@deprecated "Use Uuidx.to_int_array"] (** Deprecated alias for {! Uuidx.to_int_array} *) -val of_bytes : string -> 'a t option +val of_bytes : string -> [< not_secret] t option val to_bytes : 'a t -> string @@ -163,5 +175,5 @@ module Hash : sig namespace UUID e93e0639-2bdb-4a59-8b46-352b3f408c19. *) (* UUID Version 5 derived from argument string and namespace UUID *) - val string : string -> 'a t + val string : string -> [< not_secret] t end diff --git a/ocaml/nbd/src/main.ml b/ocaml/nbd/src/main.ml index 25919464839..5b5be77f03a 100644 --- a/ocaml/nbd/src/main.ml +++ b/ocaml/nbd/src/main.ml @@ -50,7 +50,7 @@ let handle_connection fd tls_role = ( match Uri.get_query_param uri "session_id" with | Some session_str -> (* Validate the session *) - let session_id = API.Ref.of_string session_str in + let session_id = API.Ref.of_secret_string session_str in Xen_api.Session.get_uuid ~rpc ~session_id ~self:session_id >>= fun _ -> Lwt.return session_id | None -> diff --git a/ocaml/tests/common/test_common.ml b/ocaml/tests/common/test_common.ml index c327914b0f9..7908eb4e3ff 100644 --- a/ocaml/tests/common/test_common.ml +++ b/ocaml/tests/common/test_common.ml @@ -516,7 +516,7 @@ let make_pool_update ~__context ?(ref = Ref.make ()) ?(uuid = make_uuid ()) Xapi_pool_update.create_update_record ~__context ~update:ref ~update_info ~vdi ; ref -let make_session ~__context ?(ref = Ref.make ()) ?(uuid = make_uuid ()) +let make_session ~__context ?(ref = Ref.make_secret ()) ?(uuid = make_uuid ()) ?(this_host = Ref.null) ?(this_user = Ref.null) ?(last_active = API.Date.epoch) ?(pool = false) ?(other_config = []) ?(is_local_superuser = false) ?(subject = Ref.null) diff --git a/ocaml/tests/test_client.ml b/ocaml/tests/test_client.ml index 1c3137721b8..55096a5c48a 100644 --- a/ocaml/tests/test_client.ml +++ b/ocaml/tests/test_client.ml @@ -12,7 +12,7 @@ let make_client_params ~__context = let req = Xmlrpc_client.xmlrpc ~version:"1.1" "/" in let rpc = Api_server.Server.dispatch_call req Unix.stdout in let session_id = - let session_id = Ref.make () in + let session_id = Ref.make_secret () in let now = Xapi_stdext_date.Date.now () in let (_ : _ API.Ref.t) = Test_common.make_session ~__context ~ref:session_id diff --git a/ocaml/xapi-cli-server/xapi_cli.ml b/ocaml/xapi-cli-server/xapi_cli.ml index 89a9a0177b4..59c033efb74 100644 --- a/ocaml/xapi-cli-server/xapi_cli.ml +++ b/ocaml/xapi-cli-server/xapi_cli.ml @@ -295,7 +295,8 @@ let parse_session_and_args str = try let line = List.hd args in if Astring.String.is_prefix ~affix:"session_id=" line then - ( Some (Ref.of_string (String.sub line 11 (String.length line - 11))) + ( Some + (Ref.of_secret_string (String.sub line 11 (String.length line - 11))) , List.tl args ) else diff --git a/ocaml/xapi-client/event_helper.ml b/ocaml/xapi-client/event_helper.ml index 453d2cc2c8d..3ec6e7f9236 100644 --- a/ocaml/xapi-client/event_helper.ml +++ b/ocaml/xapi-client/event_helper.ml @@ -50,7 +50,9 @@ let record_of_event ev = match ev.Event_types.ty with | "session" -> Session - (Ref.of_string ev.Event_types.reference, maybe API.session_t_of_rpc rpc) + ( Ref.of_secret_string ev.Event_types.reference + , maybe API.session_t_of_rpc rpc + ) | "task" -> Task (Ref.of_string ev.Event_types.reference, maybe API.task_t_of_rpc rpc) | "event" -> diff --git a/ocaml/xapi-guard/test/xapi_guard_test.ml b/ocaml/xapi-guard/test/xapi_guard_test.ml index b9e6fea2c9b..5486f6b61d2 100644 --- a/ocaml/xapi-guard/test/xapi_guard_test.ml +++ b/ocaml/xapi-guard/test/xapi_guard_test.ml @@ -6,7 +6,7 @@ open Xen_api_client_lwt.Xen_api_lwt_unix module D = Debug.Make (struct let name = "xapi-guard-test" end) -let expected_session_id : [`session] Ref.t = Ref.make () +let expected_session_id : [`session] Ref.t = Ref.make_secret () let vm : [`VM] Ref.t = Ref.make () diff --git a/ocaml/xapi-types/ref.ml b/ocaml/xapi-types/ref.ml index 4b1be2c27d5..c3ce6da534f 100644 --- a/ocaml/xapi-types/ref.ml +++ b/ocaml/xapi-types/ref.ml @@ -12,6 +12,14 @@ * GNU Lesser General Public License for more details. *) +type without_secret = Uuidx.without_secret + +type not_secret = + [ without_secret + | `session of [`use_make_secret_or_ref_of_secret_string_instead] ] + +type secret = Uuidx.secret + type all = Uuidx.all type 'a t = @@ -40,6 +48,10 @@ let make () = let uuid = Uuidx.(to_string (make ())) in Real uuid +let make_secret () = + let uuid = Uuidx.(to_string (make_uuid_urnd ())) in + Real uuid + let null = Null (* a dummy reference is a reference of an object which is not in database *) @@ -105,6 +117,8 @@ let of_string x = else Other x +let of_secret_string = of_string + let to_option = function Null -> None | ref -> Some ref let name_of_dummy = function diff --git a/ocaml/xapi-types/ref.mli b/ocaml/xapi-types/ref.mli index 6e27078518f..d8e8b0c0a57 100644 --- a/ocaml/xapi-types/ref.mli +++ b/ocaml/xapi-types/ref.mli @@ -12,6 +12,14 @@ * GNU Lesser General Public License for more details. *) +type without_secret = Uuidx.without_secret + +type secret = Uuidx.secret + +type not_secret = + [ without_secret + | `session of [`use_make_secret_or_ref_of_secret_string_instead] ] + type all = Uuidx.all type 'a t constraint 'a = [< all] @@ -22,9 +30,11 @@ val t_of_rpc : (Rpc.t -> 'a) -> Rpc.t -> 'a t val ref_prefix : string -val make : unit -> 'a t +val make : unit -> [< not_secret] t -val null : 'a t +val make_secret : unit -> [< secret] t + +val null : _ t val compare : 'a t -> 'a t -> int (** [compare a b] returns [0] if [a] and [b] are equal, a negative integer if @@ -38,9 +48,11 @@ val to_option : 'a t -> 'a t option val short_string_of : 'a t -> string -val of_string : string -> 'a t +val of_string : string -> [< not_secret] t + +val of_secret_string : string -> [< secret] t -val make_dummy : string -> 'a t +val make_dummy : string -> [< not_secret] t val is_real : 'a t -> bool diff --git a/ocaml/xapi/helpers.ml b/ocaml/xapi/helpers.ml index 0a32a8af1d3..8373d85e839 100644 --- a/ocaml/xapi/helpers.ml +++ b/ocaml/xapi/helpers.ml @@ -547,7 +547,7 @@ let call_api_functions ~__context f = Context.with_tracing ~__context __FUNCTION__ @@ fun __context -> match Context.get_test_rpc __context with | Some rpc -> - f rpc (Ref.of_string "fake_session") + f rpc (Ref.of_secret_string "fake_session") | None -> call_api_functions_internal ~__context f diff --git a/ocaml/xapi/import.ml b/ocaml/xapi/import.ml index 7e1a1cb8f12..a1aaa306f53 100644 --- a/ocaml/xapi/import.ml +++ b/ocaml/xapi/import.ml @@ -2515,7 +2515,7 @@ let handler (req : Request.t) s _ = if List.mem_assoc "session_id" all then let external_session_id = List.assoc "session_id" all in Xapi_session.consider_touching_session rpc - (Ref.of_string external_session_id) + (Ref.of_secret_string external_session_id) else fun () -> () in diff --git a/ocaml/xapi/xapi_http.ml b/ocaml/xapi/xapi_http.ml index 694520a5609..65de926376c 100644 --- a/ocaml/xapi/xapi_http.ml +++ b/ocaml/xapi/xapi_http.ml @@ -65,8 +65,11 @@ let ref_param_of_req (req : Http.Request.t) param_name = let _session_id = "session_id" +let session_ref_param_of_req (req : Http.Request.t) = + lookup_param_of_req req _session_id |> Option.map Ref.of_secret_string + let get_session_id (req : Request.t) = - ref_param_of_req req _session_id |> Option.value ~default:Ref.null + session_ref_param_of_req req |> Option.value ~default:Ref.null let append_to_master_audit_log __context action line = (* http actions are not automatically written to the master's audit log *) @@ -138,7 +141,7 @@ let assert_credentials_ok realm ?(http_action = realm) ?(fn = Rbac.nofn) (* Connections from unix-domain socket implies you're root on the box, ergo everything is OK *) else match - ( ref_param_of_req req _session_id + ( session_ref_param_of_req req , Helpers.secret_string_of_request req , req.Http.Request.auth ) @@ -203,7 +206,7 @@ let with_context ?(dummy = false) label (req : Request.t) (s : Unix.file_descr) ) else match - ( ref_param_of_req req _session_id + ( session_ref_param_of_req req , Helpers.secret_string_of_request req , req.Http.Request.auth ) diff --git a/ocaml/xapi/xapi_local_session.ml b/ocaml/xapi/xapi_local_session.ml index 7a5cf5f5070..2985ca3d9a4 100644 --- a/ocaml/xapi/xapi_local_session.ml +++ b/ocaml/xapi/xapi_local_session.ml @@ -26,7 +26,7 @@ let get_all ~__context = with_lock m (fun () -> Hashtbl.fold (fun k _ acc -> k :: acc) table []) let create ~__context ~pool = - let r = Ref.make () in + let r = Ref.make_secret () in let session = {r; pool; last_active= Xapi_stdext_date.Date.now ()} in with_lock m (fun () -> Hashtbl.replace table r session) ; r diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index 4def022bfcc..7e77def1f43 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -615,8 +615,8 @@ let login_no_password_common ~__context ~uname ~originator ~host ~pool ~rbac_permissions ~db_ref ~client_certificate = Context.with_tracing ~originator ~__context __FUNCTION__ @@ fun __context -> let create_session () = - let session_id = Ref.make () in - let uuid = Uuidx.to_string (Uuidx.make ()) in + let session_id = Ref.make_secret () in + let uuid = Uuidx.to_string (Uuidx.make_uuid_urnd ()) in let user = Ref.null in (* always return a null reference to the deprecated user object *) let parent = try Context.get_session_id __context with _ -> Ref.null in @@ -645,7 +645,7 @@ let login_no_password_common ~__context ~uname ~originator ~host ~pool Ref.string_of session_id in let session_id = - Ref.of_string + Ref.of_secret_string ( match db_ref with | Some db_ref -> Xapi_database.Db_backend.create_registered_session create_session diff --git a/ocaml/xapi/xapi_vm_migrate.ml b/ocaml/xapi/xapi_vm_migrate.ml index 677da6fe8f1..1f4994fee6c 100644 --- a/ocaml/xapi/xapi_vm_migrate.ml +++ b/ocaml/xapi/xapi_vm_migrate.ml @@ -103,7 +103,7 @@ let remote_of_dest ~__context dest = in let master_url = List.assoc _master dest |> maybe_set_https in let xenops_url = List.assoc _xenops dest |> maybe_set_https in - let session_id = Ref.of_string (List.assoc _session_id dest) in + let session_id = Ref.of_secret_string (List.assoc _session_id dest) in let remote_ip = get_ip_from_url xenops_url in let remote_master_ip = get_ip_from_url master_url in let dest_host_string = List.assoc _host dest in From 6635a00d68e5097e6eda8f44d78e0c7cd0589aec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 27 Sep 2024 17:13:55 +0100 Subject: [PATCH 053/141] CP-49136: Introduce PRNG for generating non-secret UUIDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not yet enabled by default, behind xapi.conf feature flag. Will be enabled by default after more testing/auditing of the code. Pool secret and Session are the only secret uuids/refs, for everything else avoid the overhead of reading from /dev/urandom every time, and use an internal PRNG. Both the opaqueref and UUID of a Session is a secret, because you can retrieve one if you can guess the other, so both must be created using CSPRNG. This is about 15x faster than using /dev/urandom: ``` ╭───────────────────────────────────────┬───────────────────────────┬───────────────────────────┬───────────────────────────╮ │name │ major-allocated │ minor-allocated │ monotonic-clock │ ├───────────────────────────────────────┼───────────────────────────┼───────────────────────────┼───────────────────────────┤ │ uuidx creation/Uuidx.make │ 0.0001 mjw/run│ 8.0001 mnw/run│ 88.1485 ns/run│ │ uuidx creation/Uuidx.make_uuid_urnd │ 0.0000 mjw/run│ 10.0014 mnw/run│ 1369.9626 ns/run│ ╰───────────────────────────────────────┴───────────────────────────┴───────────────────────────┴───────────────────────────╯ ``` Uses the type system to ensure that we cannot create Refs or Uuids of type 'session' with the wrong function. Example type error from a testcase that has ben fixed: ``` File "ocaml/tests/test_client.ml", line 18, characters 47-57: 18 | Test_common.make_session ~__context ~ref:session_id ^^^^^^^^^^ Error: This expression has type [< Uuidx.not_session ] Ref.t but an expression was expected of type [ `session ] Ref.t Type [< Uuidx.not_session ] = [< `session of [ `use_make_uuid_urnd ] ] is not compatible with type [ `session ] Types for tag `session are incompatible ``` Signed-off-by: Edwin Török --- ocaml/libs/uuid/dune | 1 + ocaml/libs/uuid/uuidx.ml | 16 ++++++++++++++-- ocaml/libs/uuid/uuidx.mli | 14 +++++++++++++- ocaml/tests/bench/bench_uuid.ml | 2 ++ ocaml/xapi-types/ref.mli | 4 ++-- ocaml/xapi/helpers.ml | 4 +++- ocaml/xapi/xapi_globs.ml | 6 ++++++ 7 files changed, 41 insertions(+), 6 deletions(-) diff --git a/ocaml/libs/uuid/dune b/ocaml/libs/uuid/dune index 5f7c5c25b95..81c7edec804 100644 --- a/ocaml/libs/uuid/dune +++ b/ocaml/libs/uuid/dune @@ -4,6 +4,7 @@ (modules uuidx) (libraries unix (re_export uuidm) + threads.posix ) (wrapped false) ) diff --git a/ocaml/libs/uuid/uuidx.ml b/ocaml/libs/uuid/uuidx.ml index 39c74bd4c8f..98eefe1ab73 100644 --- a/ocaml/libs/uuid/uuidx.ml +++ b/ocaml/libs/uuid/uuidx.ml @@ -131,8 +131,20 @@ let read_bytes dev n = let make_uuid_urnd () = of_bytes (read_bytes dev_urandom_fd 16) |> Option.get -(* Use the CSPRNG-backed urandom *) -let make = make_uuid_urnd +(** Use non-CSPRNG by default, for CSPRNG see {!val:make_uuid_urnd} *) +let make_uuid_fast = + let uuid_state = Random.State.make_self_init () in + (* On OCaml 5 we could use Random.State.split instead, + and on OCaml 4 the mutex may not be strictly needed + *) + let m = Mutex.create () in + let finally () = Mutex.unlock m in + let gen = Uuidm.v4_gen uuid_state in + fun () -> Mutex.lock m ; Fun.protect ~finally gen + +let make_default = ref make_uuid_urnd + +let make () = !make_default () type cookie = string diff --git a/ocaml/libs/uuid/uuidx.mli b/ocaml/libs/uuid/uuidx.mli index 31f67a4abbf..ebc9f2e1611 100644 --- a/ocaml/libs/uuid/uuidx.mli +++ b/ocaml/libs/uuid/uuidx.mli @@ -122,6 +122,13 @@ val make : unit -> [< not_secret] t (** Create a fresh UUID *) val make_uuid_urnd : unit -> [< secret] t +(** [make_uuid_urnd ()] generate a UUID using a CSPRNG. + Currently this reads from /dev/urandom directly. *) + +val make_uuid_fast : unit -> [< not_secret] t +(** [make_uuid_fast ()] generate a UUID using a PRNG. + Don't use this to generate secrets, see {!val:make_uuid_urnd} for that instead. + *) val pp : Format.formatter -> [< not_secret] t -> unit @@ -129,7 +136,7 @@ val equal : 'a t -> 'a t -> bool val is_uuid : string -> bool -val of_string : string -> 'a t option +val of_string : string -> [< not_secret] t option (** Create a UUID from a string. *) val to_string : 'a t -> string @@ -177,3 +184,8 @@ module Hash : sig (* UUID Version 5 derived from argument string and namespace UUID *) val string : string -> [< not_secret] t end + +(**/**) + +(* just for feature flag, to be removed *) +val make_default : (unit -> [< not_secret] t) ref diff --git a/ocaml/tests/bench/bench_uuid.ml b/ocaml/tests/bench/bench_uuid.ml index f13118e48db..a04ff192d76 100644 --- a/ocaml/tests/bench/bench_uuid.ml +++ b/ocaml/tests/bench/bench_uuid.ml @@ -1,5 +1,7 @@ open Bechamel +let () = Uuidx.make_default := Uuidx.make_uuid_fast + let benchmarks = Test.make_grouped ~name:"uuidx creation" [ diff --git a/ocaml/xapi-types/ref.mli b/ocaml/xapi-types/ref.mli index d8e8b0c0a57..2e201b6b3d6 100644 --- a/ocaml/xapi-types/ref.mli +++ b/ocaml/xapi-types/ref.mli @@ -46,7 +46,7 @@ val to_option : 'a t -> 'a t option (** [to_option ref] returns [None] when [ref] is [Ref.Null] or [Some ref] otherwise *) -val short_string_of : 'a t -> string +val short_string_of : [< not_secret] t -> string val of_string : string -> [< not_secret] t @@ -60,6 +60,6 @@ val is_dummy : 'a t -> bool val name_of_dummy : 'a t -> string -val really_pretty_and_small : 'a t -> string +val really_pretty_and_small : [< not_secret] t -> string val pp : Format.formatter -> 'a t -> unit diff --git a/ocaml/xapi/helpers.ml b/ocaml/xapi/helpers.ml index 8373d85e839..30965068f3f 100644 --- a/ocaml/xapi/helpers.ml +++ b/ocaml/xapi/helpers.ml @@ -1955,7 +1955,9 @@ end = struct (* by default we generate the pool secret using /dev/urandom, but if a script to generate the pool secret exists, use that instead *) let make_urandom () = - Stdlib.List.init 3 (fun _ -> Uuidx.(make_uuid_urnd () |> to_string)) + Stdlib.List.init 3 (fun _ -> + Uuidx.((make_uuid_urnd () : [`session] t) |> to_string) + ) |> String.concat "/" in let make_script () = diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index d23d7ec4ce6..3000669bd6f 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -1612,6 +1612,12 @@ let other_options = , (fun () -> string_of_bool !disable_webserver) , "Disable the host webserver" ) + ; ( "use-prng-uuid-gen" + (* eventually this'll be the default, except for Sessions *) + , Arg.Unit (fun () -> Uuidx.make_default := Uuidx.make_uuid_fast) + , (fun () -> !Uuidx.make_default = Uuidx.make_uuid_fast |> string_of_bool) + , "Use PRNG based UUID generator instead of CSPRNG" + ) ] (* The options can be set with the variable xapiflags in /etc/sysconfig/xapi. From e65598009c2ade68e60559f81f63a7a894cdadef Mon Sep 17 00:00:00 2001 From: Benjamin Reis Date: Mon, 30 Sep 2024 13:42:21 +0200 Subject: [PATCH 054/141] Bring up IPv6 only virtual PIF as well on startup Like for IPv4 bring up IPv6 configured virtual PIF when starting up an host. Signed-off-by: Benjamin Reis --- ocaml/xapi/xapi_pif.ml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_pif.ml b/ocaml/xapi/xapi_pif.ml index 3df1d692b39..a2383ed9d9b 100644 --- a/ocaml/xapi/xapi_pif.ml +++ b/ocaml/xapi/xapi_pif.ml @@ -1109,7 +1109,12 @@ let calculate_pifs_required_at_start_of_day ~__context = ( Not (Eq (Field "bond_master_of", Literal "()")) , Eq (Field "physical", Literal "true") ) - , Not (Eq (Field "ip_configuration_mode", Literal "None")) + , Not + (And + ( Eq (Field "ip_configuration_mode", Literal "None") + , Eq (Field "ipv6_configuration_mode", Literal "None") + ) + ) ) ) ) From af68185ba81b9817741992410b48f9e28e118e06 Mon Sep 17 00:00:00 2001 From: Steven Woods Date: Wed, 7 Aug 2024 13:33:48 +0100 Subject: [PATCH 055/141] CP-48676: Reuse pool sessions on slave logins. Prevent this reusable pool session from being destroyed so that it remains valid. This feature can be toggled with the flag reuse-pool-sessions. Signed-off-by: Steven Woods --- ocaml/xapi/xapi_globs.ml | 7 ++++ ocaml/xapi/xapi_session.ml | 77 +++++++++++++++++++++++++++++++------- 2 files changed, 71 insertions(+), 13 deletions(-) diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index 3000669bd6f..3fbe0d36b4f 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -1053,6 +1053,8 @@ let pool_recommendations_dir = ref "/etc/xapi.pool-recommendations.d" let disable_webserver = ref false +let reuse_pool_sessions = ref true + let test_open = ref 0 let xapi_globs_spec = @@ -1618,6 +1620,11 @@ let other_options = , (fun () -> !Uuidx.make_default = Uuidx.make_uuid_fast |> string_of_bool) , "Use PRNG based UUID generator instead of CSPRNG" ) + ; ( "reuse-pool-sessions" + , Arg.Set reuse_pool_sessions + , (fun () -> string_of_bool !reuse_pool_sessions) + , "Enable the reuse of pool sessions" + ) ] (* The options can be set with the variable xapiflags in /etc/sysconfig/xapi. diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index 7e77def1f43..abced81ca42 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -398,19 +398,29 @@ let is_subject_suspended ~__context ~cache subject_identifier = debug "Subject identifier %s is suspended" subject_identifier ; (is_suspended, subject_name) +let reusable_pool_session = ref Ref.null + +let reusable_pool_session_lock = Mutex.create () + let destroy_db_session ~__context ~self = Context.with_tracing ~__context __FUNCTION__ @@ fun __context -> - Xapi_event.on_session_deleted self ; - (* unregister from the event system *) - (* This info line is important for tracking, auditability and client accountability purposes on XenServer *) - (* Never print the session id nor uuid: they are secret values that should be known only to the user that *) - (* logged in. Instead, we print a non-invertible hash as the tracking id for the session id *) - (* see also task creation in context.ml *) - (* CP-982: create tracking id in log files to link username to actions *) - info "Session.destroy %s" (trackid self) ; - Rbac_audit.session_destroy ~__context ~session_id:self ; - (try Db.Session.destroy ~__context ~self with _ -> ()) ; - Rbac.destroy_session_permissions_tbl ~session_id:self + with_lock reusable_pool_session_lock (fun () -> + if self <> !reusable_pool_session then ( + Xapi_event.on_session_deleted self ; + (* unregister from the event system *) + (* This info line is important for tracking, auditability and client accountability purposes on XenServer *) + (* Never print the session id nor uuid: they are secret values that should be known only to the user that *) + (* logged in. Instead, we print a non-invertible hash as the tracking id for the session id *) + (* see also task creation in context.ml *) + (* CP-982: create tracking id in log files to link username to actions *) + info "Session.destroy %s" (trackid self) ; + Rbac_audit.session_destroy ~__context ~session_id:self ; + (try Db.Session.destroy ~__context ~self with _ -> ()) ; + Rbac.destroy_session_permissions_tbl ~session_id:self + ) else + info "Skipping Session.destroy for reusable pool session %s" + (trackid self) + ) (* CP-703: ensure that activate sessions are invalidated in a bounded time *) (* in response to external authentication/directory services updates, such as *) @@ -610,8 +620,8 @@ let revalidate_all_sessions ~__context = debug "Unexpected exception while revalidating external sessions: %s" (ExnHelper.string_of_exn e) -let login_no_password_common ~__context ~uname ~originator ~host ~pool - ~is_local_superuser ~subject ~auth_user_sid ~auth_user_name +let login_no_password_common_create_session ~__context ~uname ~originator ~host + ~pool ~is_local_superuser ~subject ~auth_user_sid ~auth_user_name ~rbac_permissions ~db_ref ~client_certificate = Context.with_tracing ~originator ~__context __FUNCTION__ @@ fun __context -> let create_session () = @@ -661,6 +671,47 @@ let login_no_password_common ~__context ~uname ~originator ~host ~pool ignore (Client.Pool.get_all ~rpc ~session_id) ; session_id +let login_no_password_common ~__context ~uname ~originator ~host ~pool + ~is_local_superuser ~subject ~auth_user_sid ~auth_user_name + ~rbac_permissions ~db_ref ~client_certificate = + Context.with_tracing ~originator ~__context __FUNCTION__ @@ fun __context -> + let is_valid_session session_id = + try + (* Call an API function to check the session is still valid *) + let rpc = Helpers.make_rpc ~__context in + ignore (Client.Pool.get_all ~rpc ~session_id) ; + true + with Api_errors.Server_error (err, _) -> + info "Invalid session: %s" err ; + false + in + let create_session () = + let new_session_id = + login_no_password_common_create_session ~__context ~uname ~originator + ~host ~pool ~is_local_superuser ~subject ~auth_user_sid ~auth_user_name + ~rbac_permissions ~db_ref ~client_certificate + in + new_session_id + in + if + (originator, pool, is_local_superuser, uname) + = (xapi_internal_originator, true, true, None) + && !Xapi_globs.reuse_pool_sessions + then + with_lock reusable_pool_session_lock (fun () -> + if + !reusable_pool_session <> Ref.null + && is_valid_session !reusable_pool_session + then + !reusable_pool_session + else + let new_session_id = create_session () in + reusable_pool_session := new_session_id ; + new_session_id + ) + else + create_session () + (* XXX: only used internally by the code which grants the guest access to the API. Needs to be protected by a proper access control system *) let login_no_password ~__context ~uname ~host ~pool ~is_local_superuser ~subject From c27b1d45b9a209ae922250a54b2a0a076af7a531 Mon Sep 17 00:00:00 2001 From: Steven Woods Date: Mon, 23 Sep 2024 11:30:08 +0100 Subject: [PATCH 056/141] CP-48676: Don't check resuable pool session validity by default Add a new flag validate-reusable-pool-session to xapi globs which skips the reusable pool session validity check if it is false. This saves time as we are no longer calling pool.get_all for each session. Signed-off-by: Steven Woods --- ocaml/xapi/xapi_globs.ml | 7 +++++++ ocaml/xapi/xapi_session.ml | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index 3fbe0d36b4f..1b0d7c9bdd5 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -1055,6 +1055,8 @@ let disable_webserver = ref false let reuse_pool_sessions = ref true +let validate_reusable_pool_session = ref false + let test_open = ref 0 let xapi_globs_spec = @@ -1625,6 +1627,11 @@ let other_options = , (fun () -> string_of_bool !reuse_pool_sessions) , "Enable the reuse of pool sessions" ) + ; ( "validate-reusable-pool-session" + , Arg.Set validate_reusable_pool_session + , (fun () -> string_of_bool !validate_reusable_pool_session) + , "Enable the reuse of pool sessions" + ) ] (* The options can be set with the variable xapiflags in /etc/sysconfig/xapi. diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index abced81ca42..bd981cb3692 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -701,7 +701,9 @@ let login_no_password_common ~__context ~uname ~originator ~host ~pool with_lock reusable_pool_session_lock (fun () -> if !reusable_pool_session <> Ref.null - && is_valid_session !reusable_pool_session + && ((not !Xapi_globs.validate_reusable_pool_session) + || is_valid_session !reusable_pool_session + ) then !reusable_pool_session else From 8a81275710e0bf681aadb609c51ff3f0781e3a5b Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Mon, 30 Sep 2024 13:32:48 +0100 Subject: [PATCH 057/141] CP-28369: Remove Unixext.daemonize Last usage of `Unixext.daemonize` was in `cdrommon`, drop it and move the daemon to be fully handled by systemd (instead of type=forking). Signed-off-by: Andrii Sultanov --- ocaml/cdrommon/cdrommon.ml | 1 - .../lib/xapi-stdext-unix/unixext.ml | 21 ------------------- .../lib/xapi-stdext-unix/unixext.mli | 4 +--- quality-gate.sh | 2 +- scripts/cdrommon@.service | 2 -- 5 files changed, 2 insertions(+), 28 deletions(-) diff --git a/ocaml/cdrommon/cdrommon.ml b/ocaml/cdrommon/cdrommon.ml index 1a897d6f9ea..7311b4604c7 100644 --- a/ocaml/cdrommon/cdrommon.ml +++ b/ocaml/cdrommon/cdrommon.ml @@ -63,6 +63,5 @@ let () = Printf.eprintf "usage: %s \n" Sys.argv.(0) ; exit 1 ) ; - Xapi_stdext_unix.Unixext.daemonize () ; (* check every 2 seconds *) check 2 Sys.argv.(1) diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/unixext.ml b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/unixext.ml index 8afed357e6c..c63a61ff783 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/unixext.ml +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/unixext.ml @@ -94,27 +94,6 @@ let with_file file mode perms f = (fun () -> f fd) (fun () -> Unix.close fd) -(* !! Must call this before spawning any threads !! *) - -(** daemonize a process *) -let daemonize () = - match Unix.fork () with - | 0 -> ( - if Unix.setsid () == -1 then - failwith "Unix.setsid failed" ; - match Unix.fork () with - | 0 -> - with_file "/dev/null" [Unix.O_WRONLY] 0 (fun nullfd -> - Unix.close Unix.stdin ; - Unix.dup2 nullfd Unix.stdout ; - Unix.dup2 nullfd Unix.stderr - ) - | _ -> - exit 0 - ) - | _ -> - exit 0 - exception Break let lines_fold f start input = diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/unixext.mli b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/unixext.mli index 3f726b52fe1..fa8eb331f25 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/unixext.mli +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/unixext.mli @@ -30,8 +30,6 @@ val pidfile_write : string -> unit val pidfile_read : string -> int option -val daemonize : unit -> unit - val with_file : string -> Unix.open_flag list @@ -262,7 +260,7 @@ val test_open : int -> unit to [Xapi_stdext_unix.Unixext.select] that use file descriptors, because such calls will then immediately fail. This assumes that [ulimit -n] has been suitably increased in the test environment. - + Can only be called once in a program, and will raise an exception otherwise. The file descriptors will stay open until the program exits. diff --git a/quality-gate.sh b/quality-gate.sh index 47e97fa37e2..e67b2ac4eda 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -40,7 +40,7 @@ mli-files () { } structural-equality () { - N=11 + N=10 EQ=$(git grep -r --count ' == ' -- '**/*.ml' ':!ocaml/sdk-gen/**/*.ml' | cut -d ':' -f 2 | paste -sd+ - | bc) if [ "$EQ" -eq "$N" ]; then echo "OK counted $EQ usages of ' == '" diff --git a/scripts/cdrommon@.service b/scripts/cdrommon@.service index 1839c7ba40a..0792b078a9e 100644 --- a/scripts/cdrommon@.service +++ b/scripts/cdrommon@.service @@ -2,6 +2,4 @@ Description=Monitor CDROM of %I [Service] -Type=forking -GuessMainPID=no ExecStart=/opt/xensource/libexec/cdrommon /dev/xapi/cd/%I From c261733f332bff3435964a555d6398f70eecf204 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 1 Oct 2024 10:12:11 +0100 Subject: [PATCH 058/141] CP-28369: Remove Xcp_service.daemonize-related functions The default for daemonize was false for a long time, and nothing expected the users to actually fork. This just removes what's been unused for a long time. Note: maybe_daemonize would run 'start_fn' even in case of 'daemonize=false'. Signed-off-by: Andrii Sultanov --- ocaml/networkd/bin/networkd.ml | 14 +++---- ocaml/squeezed/src/squeezed.ml | 3 -- ocaml/xapi-idl/lib/xcp_service.ml | 57 ----------------------------- ocaml/xapi-idl/lib/xcp_service.mli | 6 --- ocaml/xapi-storage-script/main.ml | 5 --- ocaml/xapi/xapi_main.ml | 2 - ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml | 1 - ocaml/xenopsd/lib/xenopsd.ml | 1 - ocaml/xenopsd/xc/xenops_xc_main.ml | 8 +--- quality-gate.sh | 2 +- 10 files changed, 7 insertions(+), 92 deletions(-) diff --git a/ocaml/networkd/bin/networkd.ml b/ocaml/networkd/bin/networkd.ml index 74209fd7867..3b3163a8a7a 100644 --- a/ocaml/networkd/bin/networkd.ml +++ b/ocaml/networkd/bin/networkd.ml @@ -224,15 +224,11 @@ let () = ~rpc_fn:(Idl.Exn.server Network_server.S.implementation) () in - Xcp_service.maybe_daemonize - ~start_fn:(fun () -> - Debug.set_facility Syslog.Local5 ; - (* We should make the following configurable *) - Debug.disable "http" ; - handle_shutdown () ; - Debug.with_thread_associated "main" start server - ) - () ; + Debug.set_facility Syslog.Local5 ; + (* We should make the following configurable *) + Debug.disable "http" ; + handle_shutdown () ; + Debug.with_thread_associated "main" start server ; let module Daemon = Xapi_stdext_unix.Unixext.Daemon in if Daemon.systemd_notify Daemon.State.Ready then () diff --git a/ocaml/squeezed/src/squeezed.ml b/ocaml/squeezed/src/squeezed.ml index 35a6039341a..2faf3bcaeba 100644 --- a/ocaml/squeezed/src/squeezed.ml +++ b/ocaml/squeezed/src/squeezed.ml @@ -110,9 +110,6 @@ let _ = ~rpc_fn:(Idl.Exn.server S.implementation) () in - maybe_daemonize () ; - (* NB Initialise the xenstore connection after daemonising, otherwise we lose - our connection *) let _ = Thread.create Memory_server.record_boot_time_host_free_memory () in let rpc_server = Thread.create Xcp_service.serve_forever server in Memory_server.start_balance_thread balance_check_interval ; diff --git a/ocaml/xapi-idl/lib/xcp_service.ml b/ocaml/xapi-idl/lib/xcp_service.ml index d6c3cae14db..645b04d0864 100644 --- a/ocaml/xapi-idl/lib/xcp_service.ml +++ b/ocaml/xapi-idl/lib/xcp_service.ml @@ -31,10 +31,6 @@ let log_destination = ref "syslog:daemon" let log_level = ref Syslog.Debug -let daemon = ref false - -let have_daemonized () = Unix.getppid () = 1 - let common_prefix = "org.xen.xapi." let finally f g = @@ -196,11 +192,6 @@ let common_options = , (fun () -> !log_destination) , "Where to write log messages" ) - ; ( "daemon" - , Arg.Bool (fun x -> daemon := x) - , (fun () -> string_of_bool !daemon) - , "True if we are to daemonise" - ) ; ( "disable-logging-for" , Arg.String (fun x -> @@ -552,8 +543,6 @@ let http_handler call_of_string string_of_response process s = Response.write (fun _t -> ()) response oc ) -let ign_int (t : int) = ignore t - let default_raw_fn rpc_fn s = http_handler Xmlrpc.call_of_string Xmlrpc.string_of_response rpc_fn s @@ -635,52 +624,6 @@ let serve_forever = function let rec forever () = Thread.delay 3600. ; forever () in forever () -let pidfile_write filename = - let fd = - Unix.openfile filename [Unix.O_WRONLY; Unix.O_CREAT; Unix.O_TRUNC] 0o640 - in - finally - (fun () -> - let pid = Unix.getpid () in - let buf = string_of_int pid ^ "\n" |> Bytes.of_string in - let len = Bytes.length buf in - if Unix.write fd buf 0 len <> len then - failwith "pidfile_write failed" - ) - (fun () -> Unix.close fd) - -(* Cf Stevens et al, Advanced Programming in the UNIX Environment, - Section 13.3 *) -let daemonize ?start_fn () = - if not (have_daemonized ()) then - ign_int (Unix.umask 0) ; - match Unix.fork () with - | 0 -> ( - if Unix.setsid () == -1 then failwith "Unix.setsid failed" ; - Sys.set_signal Sys.sighup Sys.Signal_ignore ; - match Unix.fork () with - | 0 -> - Option.iter (fun fn -> fn ()) start_fn ; - Unix.chdir "/" ; - mkdir_rec (Filename.dirname !pidfile) 0o755 ; - pidfile_write !pidfile ; - let nullfd = Unix.openfile "/dev/null" [Unix.O_RDWR] 0 in - Unix.dup2 nullfd Unix.stdin ; - Unix.dup2 nullfd Unix.stdout ; - Unix.dup2 nullfd Unix.stderr ; - Unix.close nullfd - | _ -> - exit 0 - ) - | _ -> - exit 0 - -let maybe_daemonize ?start_fn () = - if !daemon then - daemonize ?start_fn () - else - Option.iter (fun fn -> fn ()) start_fn - let cli ~name ~doc ~version ~cmdline_gen = let default = Term.(ret (const (fun _ -> `Help (`Pager, None)) $ const ())) in let version = diff --git a/ocaml/xapi-idl/lib/xcp_service.mli b/ocaml/xapi-idl/lib/xcp_service.mli index 2b8ce3d44d9..05196bc03a0 100644 --- a/ocaml/xapi-idl/lib/xcp_service.mli +++ b/ocaml/xapi-idl/lib/xcp_service.mli @@ -54,14 +54,8 @@ val make : val serve_forever : server -> unit -val daemon : bool ref - val loglevel : unit -> Syslog.level -val daemonize : ?start_fn:(unit -> unit) -> unit -> unit - -val maybe_daemonize : ?start_fn:(unit -> unit) -> unit -> unit - val cli : name:string -> doc:string diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index cd6575bc9b3..fb4ac093489 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -2043,11 +2043,6 @@ let _ = in configure2 ~name:"xapi-script-storage" ~version:Xapi_version.version ~doc:description ~resources ~options () ; - if !Xcp_service.daemon then ( - Xcp_service.maybe_daemonize () ; - use_syslog := true ; - info "Daemonisation successful." - ) ; let run () = let ( let* ) = ( >>= ) in let* observer_enabled = observer_is_component_enabled () in diff --git a/ocaml/xapi/xapi_main.ml b/ocaml/xapi/xapi_main.ml index bdc253921a1..0107fe37f6f 100644 --- a/ocaml/xapi/xapi_main.ml +++ b/ocaml/xapi/xapi_main.ml @@ -22,8 +22,6 @@ let _ = Debug.set_facility Syslog.Local5 ; Sys.enable_runtime_warnings true ; init_args () ; - (* need to read args to find out whether to daemonize or not *) - Xcp_service.maybe_daemonize () ; (* Disable logging for the module requested in the config *) List.iter (fun m -> diff --git a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml index dbfbd8cb73b..0a3e0c0aa9d 100644 --- a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml +++ b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml @@ -1098,7 +1098,6 @@ let _ = debug "Reading configuration file .." ; Xcp_service.configure2 ~name:Sys.argv.(0) ~version:Xapi_version.version ~doc ~options () ; - Xcp_service.maybe_daemonize () ; debug "Starting the HTTP server .." ; (* Eventually we should switch over to xcp_service to declare our services, but since it doesn't support HTTP GET and PUT we keep the old code for now. diff --git a/ocaml/xenopsd/lib/xenopsd.ml b/ocaml/xenopsd/lib/xenopsd.ml index 2052d367585..8d3c9b75f88 100644 --- a/ocaml/xenopsd/lib/xenopsd.ml +++ b/ocaml/xenopsd/lib/xenopsd.ml @@ -461,7 +461,6 @@ let main backend = (* we need to catch this to make sure at_exit handlers are triggered. In particuar, triggers for the bisect_ppx coverage profiling *) let signal_handler n = debug "caught signal %d" n ; exit 0 in - Xcp_service.maybe_daemonize () ; Sys.set_signal Sys.sigpipe Sys.Signal_ignore ; Sys.set_signal Sys.sigterm (Sys.Signal_handle signal_handler) ; Xenops_utils.set_fs_backend diff --git a/ocaml/xenopsd/xc/xenops_xc_main.ml b/ocaml/xenopsd/xc/xenops_xc_main.ml index b49f8f0f6d3..58a94917a64 100644 --- a/ocaml/xenopsd/xc/xenops_xc_main.ml +++ b/ocaml/xenopsd/xc/xenops_xc_main.ml @@ -37,13 +37,7 @@ let check_domain0_uuid () = ] in let open Ezxenstore_core.Xenstore in - with_xs (fun xs -> List.iter (fun (k, v) -> xs.Xs.write k v) kvs) ; - if !Xcp_service.daemon then - (* before daemonizing we need to forget the xenstore client because the - background thread will be gone after the fork(). - Note that this leaks a thread. - *) - forget_client () + with_xs (fun xs -> List.iter (fun (k, v) -> xs.Xs.write k v) kvs) let make_var_run_xen () = Xapi_stdext_unix.Unixext.mkdir_rec Device_common.var_run_xen_path 0o0755 diff --git a/quality-gate.sh b/quality-gate.sh index e67b2ac4eda..2a459450652 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -40,7 +40,7 @@ mli-files () { } structural-equality () { - N=10 + N=9 EQ=$(git grep -r --count ' == ' -- '**/*.ml' ':!ocaml/sdk-gen/**/*.ml' | cut -d ':' -f 2 | paste -sd+ - | bc) if [ "$EQ" -eq "$N" ]; then echo "OK counted $EQ usages of ' == '" From bc08e863a92944ea1dc2bfebfc6b3b29d4d1825b Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Mon, 30 Sep 2024 14:57:20 +0100 Subject: [PATCH 059/141] Corrected strings. Signed-off-by: Konstantina Chremmou --- ocaml/idl/datamodel.ml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 22b918a8a52..5fb25cd26a0 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -7763,8 +7763,8 @@ module VMPP = struct (Ref _vmpp, "self", "The protection policy") ; ( DateTime , "value" - , "When was the last backup was done. When the timezone is missing, \ - UTC is assumed" + , "The time at which the last backup was done. When the timezone is \ + missing, UTC is assumed" ) ] () @@ -7777,8 +7777,8 @@ module VMPP = struct (Ref _vmpp, "self", "The protection policy") ; ( DateTime , "value" - , "When was the last archive was done. When the timezone is missing, \ - UTC is assumed" + , "The time at which the last archive was created. When the timezone \ + is missing, UTC is assumed" ) ] () @@ -8052,8 +8052,8 @@ module VMSS = struct (Ref _vmss, "self", "The snapshot schedule") ; ( DateTime , "value" - , "When was the schedule was last run. When a timezone is missing, \ - UTC is assumed" + , "The time at which the schedule was last run. When the timezone is \ + missing, UTC is assumed" ) ] () From 0fbd6c700844b21fccf65fbdcbe75ff909e3584c Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 24 Sep 2024 14:05:11 +0100 Subject: [PATCH 060/141] IH-615: rrdd - Factor out Xenctrl functionality into a separate library Several metrics collectors still rely on a similar function in xcp_rrdd, but plugins will use this factored-out version. Signed-off-by: Andrii Sultanov --- ocaml/xcp-rrdd/lib/plugin/dune | 16 ++++++ ocaml/xcp-rrdd/lib/plugin/xenctrl_lib.ml | 59 +++++++++++++++++++++++ ocaml/xcp-rrdd/lib/plugin/xenctrl_lib.mli | 18 +++++++ 3 files changed, 93 insertions(+) create mode 100644 ocaml/xcp-rrdd/lib/plugin/xenctrl_lib.ml create mode 100644 ocaml/xcp-rrdd/lib/plugin/xenctrl_lib.mli diff --git a/ocaml/xcp-rrdd/lib/plugin/dune b/ocaml/xcp-rrdd/lib/plugin/dune index 12710f3305e..b2370504780 100644 --- a/ocaml/xcp-rrdd/lib/plugin/dune +++ b/ocaml/xcp-rrdd/lib/plugin/dune @@ -22,6 +22,22 @@ ) ) +(library + (name rrdd_plugin_xenctrl) + (public_name rrdd-plugin.xenctrl) + (flags (:standard -bin-annot)) + (wrapped false) + (modules xenctrl_lib) + (libraries + astring + xenctrl + ezxenstore.core + uuid + xapi-log + threads.posix + ) +) + (library (name rrdd_plugin_local) (public_name rrdd-plugin.local) diff --git a/ocaml/xcp-rrdd/lib/plugin/xenctrl_lib.ml b/ocaml/xcp-rrdd/lib/plugin/xenctrl_lib.ml new file mode 100644 index 00000000000..a486567d78c --- /dev/null +++ b/ocaml/xcp-rrdd/lib/plugin/xenctrl_lib.ml @@ -0,0 +1,59 @@ +(* + * Copyright (C) Cloud Software Group + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +module D = Debug.Make (struct let name = "xcp-rrdp-xenctrl-lib" end) + +let uuid_blacklist = ["00000000-0000-0000"; "deadbeef-dead-beef"] + +module IntSet = Set.Make (Int) + +let domain_snapshot xc = + let metadata_of_domain dom = + let ( let* ) = Option.bind in + let* uuid_raw = Uuidx.of_int_array dom.Xenctrl.handle in + let uuid = Uuidx.to_string uuid_raw in + let domid = dom.Xenctrl.domid in + let start = String.sub uuid 0 18 in + (* Actively hide migrating VM uuids, these are temporary and xenops writes + the original and the final uuid to xenstore *) + let uuid_from_key key = + let path = Printf.sprintf "/vm/%s/%s" uuid key in + try Ezxenstore_core.Xenstore.(with_xs (fun xs -> xs.read path)) + with Xs_protocol.Enoent _hint -> + D.info "Couldn't read path %s; falling back to actual uuid" path ; + uuid + in + let stable_uuid = Option.fold ~none:uuid ~some:uuid_from_key in + if List.mem start uuid_blacklist then + None + else + let key = + if Astring.String.is_suffix ~affix:"000000000000" uuid then + Some "origin-uuid" + else if Astring.String.is_suffix ~affix:"000000000001" uuid then + Some "final-uuid" + else + None + in + Some (dom, stable_uuid key, domid) + in + let domains = + Xenctrl.domain_getinfolist xc 0 |> List.filter_map metadata_of_domain + in + let timestamp = Unix.gettimeofday () in + let domain_paused (d, uuid, _) = + if d.Xenctrl.paused then Some uuid else None + in + let paused_uuids = List.filter_map domain_paused domains in + (timestamp, domains, paused_uuids) diff --git a/ocaml/xcp-rrdd/lib/plugin/xenctrl_lib.mli b/ocaml/xcp-rrdd/lib/plugin/xenctrl_lib.mli new file mode 100644 index 00000000000..558158b438c --- /dev/null +++ b/ocaml/xcp-rrdd/lib/plugin/xenctrl_lib.mli @@ -0,0 +1,18 @@ +(* + * Copyright (C) Cloud Software Group + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +(* Provides a list of running, non-migrating, and paused VMs *) +val domain_snapshot : + Xenctrl.handle + -> float * (Xenctrl.domaininfo * string * int) list * string list From 772229489e8593b165371743b6a93400d3beb5ef Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 24 Sep 2024 13:04:14 +0100 Subject: [PATCH 061/141] IH-615: Move netdev_dss into a separate RRDD plugin It still currently reads from a file written to by networkd and deserializes the stats. Signed-off-by: Andrii Sultanov --- Makefile | 4 +- ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml | 132 --------------- ocaml/xcp-rrdd/bin/rrdp-netdev/dune | 22 +++ ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml | 157 ++++++++++++++++++ .../xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.mli | 0 .../bin/rrdp-scripts/sysconfig-rrdd-plugins | 2 +- scripts/xe-toolstack-restart | 1 + 7 files changed, 183 insertions(+), 135 deletions(-) create mode 100644 ocaml/xcp-rrdd/bin/rrdp-netdev/dune create mode 100644 ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml create mode 100644 ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.mli diff --git a/Makefile b/Makefile index 186b6c3e92f..53d01a4b063 100644 --- a/Makefile +++ b/Makefile @@ -109,7 +109,7 @@ quality-gate: install-scripts: $(MAKE) -C scripts install - + install-python3: $(MAKE) -C python3 install @@ -164,7 +164,7 @@ install-dune1: dune install $(DUNE_IU_PACKAGES1) DUNE_IU_PACKAGES2=-j $(JOBS) --destdir=$(DESTDIR) --prefix=$(OPTDIR) --libdir=$(LIBDIR) --mandir=$(MANDIR) --libexecdir=$(OPTDIR)/libexec --datadir=$(DOCDIR) xapi xe - + install-dune2: dune install $(DUNE_IU_PACKAGES2) diff --git a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml index dbfbd8cb73b..69d55a217d3 100644 --- a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml +++ b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml @@ -415,137 +415,6 @@ let dss_hostload xc domains = ) ] -(*****************************************************) -(* network related code *) -(*****************************************************) - -let dss_netdev doms = - let uuid_of_domid domains domid = - let _, uuid, _ = - try List.find (fun (_, _, domid') -> domid = domid') domains - with Not_found -> - failwith - (Printf.sprintf "Failed to find uuid corresponding to domid: %d" domid) - in - uuid - in - let open Network_stats in - let stats = Network_stats.read_stats () in - let dss, sum_rx, sum_tx = - List.fold_left - (fun (dss, sum_rx, sum_tx) (dev, stat) -> - if not Astring.String.(is_prefix ~affix:"vif" dev) then - let pif_name = "pif_" ^ dev in - ( ( Rrd.Host - , Ds.ds_make ~name:(pif_name ^ "_rx") - ~description: - ("Bytes per second received on physical interface " ^ dev) - ~units:"B/s" ~value:(Rrd.VT_Int64 stat.rx_bytes) ~ty:Rrd.Derive - ~min:0.0 ~default:true () - ) - :: ( Rrd.Host - , Ds.ds_make ~name:(pif_name ^ "_tx") - ~description: - ("Bytes per second sent on physical interface " ^ dev) - ~units:"B/s" ~value:(Rrd.VT_Int64 stat.tx_bytes) - ~ty:Rrd.Derive ~min:0.0 ~default:true () - ) - :: ( Rrd.Host - , Ds.ds_make ~name:(pif_name ^ "_rx_errors") - ~description: - ("Receive errors per second on physical interface " ^ dev) - ~units:"err/s" ~value:(Rrd.VT_Int64 stat.rx_errors) - ~ty:Rrd.Derive ~min:0.0 ~default:false () - ) - :: ( Rrd.Host - , Ds.ds_make ~name:(pif_name ^ "_tx_errors") - ~description: - ("Transmit errors per second on physical interface " ^ dev) - ~units:"err/s" ~value:(Rrd.VT_Int64 stat.tx_errors) - ~ty:Rrd.Derive ~min:0.0 ~default:false () - ) - :: dss - , Int64.add stat.rx_bytes sum_rx - , Int64.add stat.tx_bytes sum_tx - ) - else - ( ( try - let d1, d2 = - Scanf.sscanf dev "vif%d.%d" (fun d1 d2 -> (d1, d2)) - in - let vif_name = Printf.sprintf "vif_%d" d2 in - (* Note: rx and tx are the wrong way round because from dom0 we - see the vms backwards *) - let uuid = uuid_of_domid doms d1 in - ( Rrd.VM uuid - , Ds.ds_make ~name:(vif_name ^ "_tx") ~units:"B/s" - ~description: - ("Bytes per second transmitted on virtual interface \ - number '" - ^ string_of_int d2 - ^ "'" - ) - ~value:(Rrd.VT_Int64 stat.rx_bytes) ~ty:Rrd.Derive ~min:0.0 - ~default:true () - ) - :: ( Rrd.VM uuid - , Ds.ds_make ~name:(vif_name ^ "_rx") ~units:"B/s" - ~description: - ("Bytes per second received on virtual interface \ - number '" - ^ string_of_int d2 - ^ "'" - ) - ~value:(Rrd.VT_Int64 stat.tx_bytes) ~ty:Rrd.Derive - ~min:0.0 ~default:true () - ) - :: ( Rrd.VM uuid - , Ds.ds_make ~name:(vif_name ^ "_rx_errors") ~units:"err/s" - ~description: - ("Receive errors per second on virtual interface \ - number '" - ^ string_of_int d2 - ^ "'" - ) - ~value:(Rrd.VT_Int64 stat.tx_errors) ~ty:Rrd.Derive - ~min:0.0 ~default:false () - ) - :: ( Rrd.VM uuid - , Ds.ds_make ~name:(vif_name ^ "_tx_errors") ~units:"err/s" - ~description: - ("Transmit errors per second on virtual interface \ - number '" - ^ string_of_int d2 - ^ "'" - ) - ~value:(Rrd.VT_Int64 stat.rx_errors) ~ty:Rrd.Derive - ~min:0.0 ~default:false () - ) - :: dss - with _ -> dss - ) - , sum_rx - , sum_tx - ) - ) - ([], 0L, 0L) stats - in - [ - ( Rrd.Host - , Ds.ds_make ~name:"pif_aggr_rx" - ~description:"Bytes per second received on all physical interfaces" - ~units:"B/s" ~value:(Rrd.VT_Int64 sum_rx) ~ty:Rrd.Derive ~min:0.0 - ~default:true () - ) - ; ( Rrd.Host - , Ds.ds_make ~name:"pif_aggr_tx" - ~description:"Bytes per second sent on all physical interfaces" - ~units:"B/s" ~value:(Rrd.VT_Int64 sum_tx) ~ty:Rrd.Derive ~min:0.0 - ~default:true () - ) - ] - @ dss - (*****************************************************) (* memory stats *) (*****************************************************) @@ -834,7 +703,6 @@ let dom0_stat_generators = ; ("vcpus", fun xc _ domains -> dss_vcpus xc domains) ; ("loadavg", fun _ _ _ -> dss_loadavg ()) ; ("hostload", fun xc _ domains -> dss_hostload xc domains) - ; ("netdev", fun _ _ domains -> dss_netdev domains) ; ("cache", fun _ timestamp _ -> dss_cache timestamp) ] diff --git a/ocaml/xcp-rrdd/bin/rrdp-netdev/dune b/ocaml/xcp-rrdd/bin/rrdp-netdev/dune new file mode 100644 index 00000000000..7c538027368 --- /dev/null +++ b/ocaml/xcp-rrdd/bin/rrdp-netdev/dune @@ -0,0 +1,22 @@ +(executable + (modes exe) + (name rrdp_netdev) + (libraries + astring + rrdd-plugin + rrdd-plugin.xenctrl + rrdd_plugins_libs + xapi-idl + xapi-idl.network + xapi-idl.rrd + xapi-log + xapi-rrd + xenctrl + ) +) + +(install + (package xapi) + (files (rrdp_netdev.exe as xcp-rrdd-plugins/xcp-rrdd-netdev)) + (section libexec_root) +) diff --git a/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml b/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml new file mode 100644 index 00000000000..55be1e88a0b --- /dev/null +++ b/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml @@ -0,0 +1,157 @@ +(* + * Copyright (C) Cloud Software Group + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +open Rrdd_plugin + +module D = Debug.Make (struct let name = "xcp-rrdp-netdev" end) + +module Process = Rrdd_plugin.Process (struct let name = "xcp-rrdd-netdev" end) + +let generate_netdev_dss doms () = + let uuid_of_domid domains domid = + let _, uuid, _ = + try List.find (fun (_, _, domid') -> domid = domid') domains + with Not_found -> + failwith + (Printf.sprintf "Failed to find uuid corresponding to domid: %d" domid) + in + uuid + in + let open Network_stats in + let stats = Network_stats.read_stats () in + let dss, sum_rx, sum_tx = + List.fold_left + (fun (dss, sum_rx, sum_tx) (dev, stat) -> + if not Astring.String.(is_prefix ~affix:"vif" dev) then + let pif_name = "pif_" ^ dev in + ( ( Rrd.Host + , Ds.ds_make ~name:(pif_name ^ "_rx") + ~description: + ("Bytes per second received on physical interface " ^ dev) + ~units:"B/s" ~value:(Rrd.VT_Int64 stat.rx_bytes) ~ty:Rrd.Derive + ~min:0.0 ~default:true () + ) + :: ( Rrd.Host + , Ds.ds_make ~name:(pif_name ^ "_tx") + ~description: + ("Bytes per second sent on physical interface " ^ dev) + ~units:"B/s" ~value:(Rrd.VT_Int64 stat.tx_bytes) + ~ty:Rrd.Derive ~min:0.0 ~default:true () + ) + :: ( Rrd.Host + , Ds.ds_make ~name:(pif_name ^ "_rx_errors") + ~description: + ("Receive errors per second on physical interface " ^ dev) + ~units:"err/s" ~value:(Rrd.VT_Int64 stat.rx_errors) + ~ty:Rrd.Derive ~min:0.0 ~default:false () + ) + :: ( Rrd.Host + , Ds.ds_make ~name:(pif_name ^ "_tx_errors") + ~description: + ("Transmit errors per second on physical interface " ^ dev) + ~units:"err/s" ~value:(Rrd.VT_Int64 stat.tx_errors) + ~ty:Rrd.Derive ~min:0.0 ~default:false () + ) + :: dss + , Int64.add stat.rx_bytes sum_rx + , Int64.add stat.tx_bytes sum_tx + ) + else + ( ( try + let d1, d2 = + Scanf.sscanf dev "vif%d.%d" (fun d1 d2 -> (d1, d2)) + in + let vif_name = Printf.sprintf "vif_%d" d2 in + (* Note: rx and tx are the wrong way round because from dom0 we + see the vms backwards *) + let uuid = uuid_of_domid doms d1 in + ( Rrd.VM uuid + , Ds.ds_make ~name:(vif_name ^ "_tx") ~units:"B/s" + ~description: + ("Bytes per second transmitted on virtual interface \ + number '" + ^ string_of_int d2 + ^ "'" + ) + ~value:(Rrd.VT_Int64 stat.rx_bytes) ~ty:Rrd.Derive ~min:0.0 + ~default:true () + ) + :: ( Rrd.VM uuid + , Ds.ds_make ~name:(vif_name ^ "_rx") ~units:"B/s" + ~description: + ("Bytes per second received on virtual interface \ + number '" + ^ string_of_int d2 + ^ "'" + ) + ~value:(Rrd.VT_Int64 stat.tx_bytes) ~ty:Rrd.Derive + ~min:0.0 ~default:true () + ) + :: ( Rrd.VM uuid + , Ds.ds_make ~name:(vif_name ^ "_rx_errors") ~units:"err/s" + ~description: + ("Receive errors per second on virtual interface \ + number '" + ^ string_of_int d2 + ^ "'" + ) + ~value:(Rrd.VT_Int64 stat.tx_errors) ~ty:Rrd.Derive + ~min:0.0 ~default:false () + ) + :: ( Rrd.VM uuid + , Ds.ds_make ~name:(vif_name ^ "_tx_errors") ~units:"err/s" + ~description: + ("Transmit errors per second on virtual interface \ + number '" + ^ string_of_int d2 + ^ "'" + ) + ~value:(Rrd.VT_Int64 stat.rx_errors) ~ty:Rrd.Derive + ~min:0.0 ~default:false () + ) + :: dss + with _ -> dss + ) + , sum_rx + , sum_tx + ) + ) + ([], 0L, 0L) stats + in + [ + ( Rrd.Host + , Ds.ds_make ~name:"pif_aggr_rx" + ~description:"Bytes per second received on all physical interfaces" + ~units:"B/s" ~value:(Rrd.VT_Int64 sum_rx) ~ty:Rrd.Derive ~min:0.0 + ~default:true () + ) + ; ( Rrd.Host + , Ds.ds_make ~name:"pif_aggr_tx" + ~description:"Bytes per second sent on all physical interfaces" + ~units:"B/s" ~value:(Rrd.VT_Int64 sum_tx) ~ty:Rrd.Derive ~min:0.0 + ~default:true () + ) + ] + @ dss + +let _ = + Xenctrl.with_intf (fun xc -> + let _, domains, _ = Xenctrl_lib.domain_snapshot xc in + Process.initialise () ; + (* Share one page per virtual NIC - documentation specifies max is 512 *) + let shared_page_count = 512 in + Process.main_loop ~neg_shift:0.5 + ~target:(Reporter.Local shared_page_count) ~protocol:Rrd_interface.V2 + ~dss_f:(generate_netdev_dss domains) + ) diff --git a/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.mli b/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.mli new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ocaml/xcp-rrdd/bin/rrdp-scripts/sysconfig-rrdd-plugins b/ocaml/xcp-rrdd/bin/rrdp-scripts/sysconfig-rrdd-plugins index ced7c537254..97846e704e4 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-scripts/sysconfig-rrdd-plugins +++ b/ocaml/xcp-rrdd/bin/rrdp-scripts/sysconfig-rrdd-plugins @@ -1 +1 @@ -PLUGINS="xcp-rrdd-iostat xcp-rrdd-squeezed xcp-rrdd-xenpm xcp-rrdd-dcmi" +PLUGINS="xcp-rrdd-iostat xcp-rrdd-squeezed xcp-rrdd-xenpm xcp-rrdd-dcmi xcp-rrdd-netdev" diff --git a/scripts/xe-toolstack-restart b/scripts/xe-toolstack-restart index 32ee88609c5..1ceeeddbe5b 100755 --- a/scripts/xe-toolstack-restart +++ b/scripts/xe-toolstack-restart @@ -29,6 +29,7 @@ POOLCONF=`cat @ETCXENDIR@/pool.conf` if [ $POOLCONF == "master" ]; then MPATHALERT="mpathalert"; else MPATHALERT=""; fi SERVICES="message-switch perfmon v6d xenopsd xenopsd-xc xenopsd-xenlight xenopsd-simulator xenopsd-libvirt xcp-rrdd-iostat xcp-rrdd-squeezed + xcp-rrdd-netdev xcp-rrdd-xenpm xcp-rrdd-gpumon xcp-rrdd xcp-networkd squeezed forkexecd $MPATHALERT xapi-storage-script xapi-clusterd varstored-guard" From b3ea09222b7d203575588294f97abaa017587847 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 24 Sep 2024 14:26:59 +0100 Subject: [PATCH 062/141] IH-615: Move CPU-related data-source collection into a separate RRDD plugin Signed-off-by: Andrii Sultanov --- ocaml/xcp-rrdd/bin/rrdd/rrdd_common.ml | 4 - ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml | 219 ---------------- ocaml/xcp-rrdd/bin/rrdp-cpu/dune | 21 ++ ocaml/xcp-rrdd/bin/rrdp-cpu/rrdd_common.ml | 18 ++ ocaml/xcp-rrdd/bin/rrdp-cpu/rrdd_common.mli | 15 ++ ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml | 246 ++++++++++++++++++ ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.mli | 0 .../bin/rrdp-scripts/sysconfig-rrdd-plugins | 2 +- quality-gate.sh | 2 +- scripts/xe-toolstack-restart | 4 +- 10 files changed, 304 insertions(+), 227 deletions(-) delete mode 100644 ocaml/xcp-rrdd/bin/rrdd/rrdd_common.ml create mode 100644 ocaml/xcp-rrdd/bin/rrdp-cpu/dune create mode 100644 ocaml/xcp-rrdd/bin/rrdp-cpu/rrdd_common.ml create mode 100644 ocaml/xcp-rrdd/bin/rrdp-cpu/rrdd_common.mli create mode 100644 ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml create mode 100644 ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.mli diff --git a/ocaml/xcp-rrdd/bin/rrdd/rrdd_common.ml b/ocaml/xcp-rrdd/bin/rrdd/rrdd_common.ml deleted file mode 100644 index dd86dbcf1dd..00000000000 --- a/ocaml/xcp-rrdd/bin/rrdd/rrdd_common.ml +++ /dev/null @@ -1,4 +0,0 @@ -let loadavg () = - let split_colon line = Astring.String.fields ~empty:false line in - let all = Xapi_stdext_unix.Unixext.string_of_file "/proc/loadavg" in - try float_of_string (List.hd (split_colon all)) with _ -> -1. diff --git a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml index 69d55a217d3..d7fec8abbe0 100644 --- a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml +++ b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml @@ -200,221 +200,6 @@ end module Watcher = Watch.WatchXenstore (Meminfo) -(*****************************************************) -(* cpu related code *) -(*****************************************************) - -let xen_flag_complement = Int64.(shift_left 1L 63 |> lognot) - -(* This function is used for getting vcpu stats of the VMs present on this host. *) -let dss_vcpus xc doms = - List.fold_left - (fun dss (dom, uuid, domid) -> - let maxcpus = dom.Xenctrl.max_vcpu_id + 1 in - let rec cpus i dss = - if i >= maxcpus then - dss - else - let vcpuinfo = Xenctrl.domain_get_vcpuinfo xc domid i in - (* Workaround for Xen leaking the flag XEN_RUNSTATE_UPDATE; using a - mask of its complement ~(1 << 63) *) - let cpu_time = - Int64.( - to_float @@ logand vcpuinfo.Xenctrl.cputime xen_flag_complement - ) - in - (* Convert from nanoseconds to seconds *) - let cpu_time = cpu_time /. 1.0e9 in - let cputime_rrd = - ( Rrd.VM uuid - , Ds.ds_make ~name:(Printf.sprintf "cpu%d" i) ~units:"(fraction)" - ~description:(Printf.sprintf "CPU%d usage" i) - ~value:(Rrd.VT_Float cpu_time) ~ty:Rrd.Derive ~default:true - ~min:0.0 ~max:1.0 () - ) - in - cpus (i + 1) (cputime_rrd :: dss) - in - (* Runstate info is per-domain rather than per-vcpu *) - let dss = - let dom_cpu_time = - Int64.(to_float @@ logand dom.Xenctrl.cpu_time xen_flag_complement) - in - let dom_cpu_time = - dom_cpu_time /. (1.0e9 *. float_of_int dom.Xenctrl.nr_online_vcpus) - in - try - let ri = Xenctrl.domain_get_runstate_info xc domid in - ( Rrd.VM uuid - , Ds.ds_make ~name:"runstate_fullrun" ~units:"(fraction)" - ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time0 /. 1.0e9)) - ~description:"Fraction of time that all VCPUs are running" - ~ty:Rrd.Derive ~default:false ~min:0.0 () - ) - :: ( Rrd.VM uuid - , Ds.ds_make ~name:"runstate_full_contention" ~units:"(fraction)" - ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time1 /. 1.0e9)) - ~description: - "Fraction of time that all VCPUs are runnable (i.e., \ - waiting for CPU)" - ~ty:Rrd.Derive ~default:false ~min:0.0 () - ) - :: ( Rrd.VM uuid - , Ds.ds_make ~name:"runstate_concurrency_hazard" - ~units:"(fraction)" - ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time2 /. 1.0e9)) - ~description: - "Fraction of time that some VCPUs are running and some are \ - runnable" - ~ty:Rrd.Derive ~default:false ~min:0.0 () - ) - :: ( Rrd.VM uuid - , Ds.ds_make ~name:"runstate_blocked" ~units:"(fraction)" - ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time3 /. 1.0e9)) - ~description: - "Fraction of time that all VCPUs are blocked or offline" - ~ty:Rrd.Derive ~default:false ~min:0.0 () - ) - :: ( Rrd.VM uuid - , Ds.ds_make ~name:"runstate_partial_run" ~units:"(fraction)" - ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time4 /. 1.0e9)) - ~description: - "Fraction of time that some VCPUs are running, and some are \ - blocked" - ~ty:Rrd.Derive ~default:false ~min:0.0 () - ) - :: ( Rrd.VM uuid - , Ds.ds_make ~name:"runstate_partial_contention" - ~units:"(fraction)" - ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time5 /. 1.0e9)) - ~description: - "Fraction of time that some VCPUs are runnable and some are \ - blocked" - ~ty:Rrd.Derive ~default:false ~min:0.0 () - ) - :: ( Rrd.VM uuid - , Ds.ds_make - ~name:(Printf.sprintf "cpu_usage") - ~units:"(fraction)" - ~description:(Printf.sprintf "Domain CPU usage") - ~value:(Rrd.VT_Float dom_cpu_time) ~ty:Rrd.Derive ~default:true - ~min:0.0 ~max:1.0 () - ) - :: dss - with _ -> dss - in - try cpus 0 dss with _ -> dss - ) - [] doms - -let physcpus = ref [||] - -let dss_pcpus xc = - let len = Array.length !physcpus in - let newinfos = - if len = 0 then ( - let physinfo = Xenctrl.physinfo xc in - let pcpus = physinfo.Xenctrl.nr_cpus in - physcpus := if pcpus > 0 then Array.make pcpus 0L else [||] ; - Xenctrl.pcpu_info xc pcpus - ) else - Xenctrl.pcpu_info xc len - in - let dss, len_newinfos = - Array.fold_left - (fun (acc, i) v -> - ( ( Rrd.Host - , Ds.ds_make ~name:(Printf.sprintf "cpu%d" i) ~units:"(fraction)" - ~description:("Physical cpu usage for cpu " ^ string_of_int i) - ~value:(Rrd.VT_Float (Int64.to_float v /. 1.0e9)) - ~min:0.0 ~max:1.0 ~ty:Rrd.Derive ~default:true - ~transform:(fun x -> 1.0 -. x) - () - ) - :: acc - , i + 1 - ) - ) - ([], 0) newinfos - in - let sum_array = Array.fold_left (fun acc v -> Int64.add acc v) 0L newinfos in - let avg_array = Int64.to_float sum_array /. float_of_int len_newinfos in - let avgcpu_ds = - ( Rrd.Host - , Ds.ds_make ~name:"cpu_avg" ~units:"(fraction)" - ~description:"Average physical cpu usage" - ~value:(Rrd.VT_Float (avg_array /. 1.0e9)) - ~min:0.0 ~max:1.0 ~ty:Rrd.Derive ~default:true - ~transform:(fun x -> 1.0 -. x) - () - ) - in - avgcpu_ds :: dss - -let dss_loadavg () = - [ - ( Rrd.Host - , Ds.ds_make ~name:"loadavg" ~units:"(fraction)" - ~description:"Domain0 loadavg" - ~value:(Rrd.VT_Float (Rrdd_common.loadavg ())) - ~ty:Rrd.Gauge ~default:true () - ) - ] - -let count_power_state_running_domains domains = - List.fold_left - (fun count (dom, _, _) -> - if not dom.Xenctrl.paused then count + 1 else count - ) - 0 domains - -let dss_hostload xc domains = - let physinfo = Xenctrl.physinfo xc in - let pcpus = physinfo.Xenctrl.nr_cpus in - let rec sum acc n f = - match n with n when n >= 0 -> sum (acc + f n) (n - 1) f | _ -> acc - in - let load = - List.fold_left - (fun acc (dom, _, domid) -> - sum 0 dom.Xenctrl.max_vcpu_id (fun id -> - let vcpuinfo = Xenctrl.domain_get_vcpuinfo xc domid id in - if vcpuinfo.Xenctrl.online && not vcpuinfo.Xenctrl.blocked then - 1 - else - 0 - ) - + acc - ) - 0 domains - in - let running_domains = count_power_state_running_domains domains in - - let load_per_cpu = float_of_int load /. float_of_int pcpus in - [ - ( Rrd.Host - , Ds.ds_make ~name:"hostload" ~units:"(fraction)" - ~description: - ("Host load per physical cpu, where load refers to " - ^ "the number of vCPU(s) in running or runnable status." - ) - ~value:(Rrd.VT_Float load_per_cpu) ~min:0.0 ~ty:Rrd.Gauge ~default:true - () - ) - ; ( Rrd.Host - , Ds.ds_make ~name:"running_vcpus" ~units:"count" - ~description:"The total number of running vCPUs per host" - ~value:(Rrd.VT_Int64 (Int64.of_int load)) - ~min:0.0 ~ty:Rrd.Gauge ~default:true () - ) - ; ( Rrd.Host - , Ds.ds_make ~name:"running_domains" ~units:"count" - ~description:"The total number of running domains per host" - ~value:(Rrd.VT_Int64 (Int64.of_int running_domains)) - ~min:0.0 ~ty:Rrd.Gauge ~default:true () - ) - ] - (*****************************************************) (* memory stats *) (*****************************************************) @@ -699,10 +484,6 @@ let dom0_stat_generators = ("ha", fun _ _ _ -> Rrdd_ha_stats.all ()) ; ("mem_host", fun xc _ _ -> dss_mem_host xc) ; ("mem_vms", fun _ _ domains -> dss_mem_vms domains) - ; ("pcpus", fun xc _ _ -> dss_pcpus xc) - ; ("vcpus", fun xc _ domains -> dss_vcpus xc domains) - ; ("loadavg", fun _ _ _ -> dss_loadavg ()) - ; ("hostload", fun xc _ domains -> dss_hostload xc domains) ; ("cache", fun _ timestamp _ -> dss_cache timestamp) ] diff --git a/ocaml/xcp-rrdd/bin/rrdp-cpu/dune b/ocaml/xcp-rrdd/bin/rrdp-cpu/dune new file mode 100644 index 00000000000..b654417bf0a --- /dev/null +++ b/ocaml/xcp-rrdd/bin/rrdp-cpu/dune @@ -0,0 +1,21 @@ +(executable + (modes exe) + (name rrdp_cpu) + (libraries + astring + rrdd-plugin + rrdd-plugin.xenctrl + rrdd_plugins_libs + xapi-idl.rrd + xapi-log + xapi-rrd + xapi-stdext-unix + xenctrl + ) +) + +(install + (package xapi) + (files (rrdp_cpu.exe as xcp-rrdd-plugins/xcp-rrdd-cpu)) + (section libexec_root) +) diff --git a/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdd_common.ml b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdd_common.ml new file mode 100644 index 00000000000..ec60aadc043 --- /dev/null +++ b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdd_common.ml @@ -0,0 +1,18 @@ +(* + * Copyright (C) Cloud Software Group + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +let loadavg () = + let split_colon line = Astring.String.fields ~empty:false line in + let all = Xapi_stdext_unix.Unixext.string_of_file "/proc/loadavg" in + try float_of_string (List.hd (split_colon all)) with _ -> -1. diff --git a/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdd_common.mli b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdd_common.mli new file mode 100644 index 00000000000..dc460df1be7 --- /dev/null +++ b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdd_common.mli @@ -0,0 +1,15 @@ +(* + * Copyright (C) Cloud Software Group + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +val loadavg : unit -> float diff --git a/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml new file mode 100644 index 00000000000..8faf484f2b0 --- /dev/null +++ b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml @@ -0,0 +1,246 @@ +(* + * Copyright (C) Cloud Software Group + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +open Rrdd_plugin + +module D = Debug.Make (struct let name = "xcp-rrdp-cpu" end) + +module Process = Rrdd_plugin.Process (struct let name = "xcp-rrdd-cpu" end) + +let xen_flag_complement = Int64.(shift_left 1L 63 |> lognot) + +(* This function is used for getting vcpu stats of the VMs present on this host. *) +let dss_vcpus xc doms = + List.fold_left + (fun dss (dom, uuid, domid) -> + let maxcpus = dom.Xenctrl.max_vcpu_id + 1 in + let rec cpus i dss = + if i >= maxcpus then + dss + else + let vcpuinfo = Xenctrl.domain_get_vcpuinfo xc domid i in + (* Workaround for Xen leaking the flag XEN_RUNSTATE_UPDATE; using a + mask of its complement ~(1 << 63) *) + let cpu_time = + Int64.( + to_float @@ logand vcpuinfo.Xenctrl.cputime xen_flag_complement + ) + in + (* Convert from nanoseconds to seconds *) + let cpu_time = cpu_time /. 1.0e9 in + let cputime_rrd = + ( Rrd.VM uuid + , Ds.ds_make ~name:(Printf.sprintf "cpu%d" i) ~units:"(fraction)" + ~description:(Printf.sprintf "CPU%d usage" i) + ~value:(Rrd.VT_Float cpu_time) ~ty:Rrd.Derive ~default:true + ~min:0.0 ~max:1.0 () + ) + in + cpus (i + 1) (cputime_rrd :: dss) + in + (* Runstate info is per-domain rather than per-vcpu *) + let dss = + let dom_cpu_time = + Int64.(to_float @@ logand dom.Xenctrl.cpu_time xen_flag_complement) + in + let dom_cpu_time = + dom_cpu_time /. (1.0e9 *. float_of_int dom.Xenctrl.nr_online_vcpus) + in + try + let ri = Xenctrl.domain_get_runstate_info xc domid in + ( Rrd.VM uuid + , Ds.ds_make ~name:"runstate_fullrun" ~units:"(fraction)" + ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time0 /. 1.0e9)) + ~description:"Fraction of time that all VCPUs are running" + ~ty:Rrd.Derive ~default:false ~min:0.0 () + ) + :: ( Rrd.VM uuid + , Ds.ds_make ~name:"runstate_full_contention" ~units:"(fraction)" + ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time1 /. 1.0e9)) + ~description: + "Fraction of time that all VCPUs are runnable (i.e., \ + waiting for CPU)" + ~ty:Rrd.Derive ~default:false ~min:0.0 () + ) + :: ( Rrd.VM uuid + , Ds.ds_make ~name:"runstate_concurrency_hazard" + ~units:"(fraction)" + ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time2 /. 1.0e9)) + ~description: + "Fraction of time that some VCPUs are running and some are \ + runnable" + ~ty:Rrd.Derive ~default:false ~min:0.0 () + ) + :: ( Rrd.VM uuid + , Ds.ds_make ~name:"runstate_blocked" ~units:"(fraction)" + ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time3 /. 1.0e9)) + ~description: + "Fraction of time that all VCPUs are blocked or offline" + ~ty:Rrd.Derive ~default:false ~min:0.0 () + ) + :: ( Rrd.VM uuid + , Ds.ds_make ~name:"runstate_partial_run" ~units:"(fraction)" + ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time4 /. 1.0e9)) + ~description: + "Fraction of time that some VCPUs are running, and some are \ + blocked" + ~ty:Rrd.Derive ~default:false ~min:0.0 () + ) + :: ( Rrd.VM uuid + , Ds.ds_make ~name:"runstate_partial_contention" + ~units:"(fraction)" + ~value:(Rrd.VT_Float (Int64.to_float ri.Xenctrl.time5 /. 1.0e9)) + ~description: + "Fraction of time that some VCPUs are runnable and some are \ + blocked" + ~ty:Rrd.Derive ~default:false ~min:0.0 () + ) + :: ( Rrd.VM uuid + , Ds.ds_make + ~name:(Printf.sprintf "cpu_usage") + ~units:"(fraction)" + ~description:(Printf.sprintf "Domain CPU usage") + ~value:(Rrd.VT_Float dom_cpu_time) ~ty:Rrd.Derive ~default:true + ~min:0.0 ~max:1.0 () + ) + :: dss + with _ -> dss + in + try cpus 0 dss with _ -> dss + ) + [] doms + +let physcpus = ref [||] + +let dss_pcpus xc = + let len = Array.length !physcpus in + let newinfos = + if len = 0 then ( + let physinfo = Xenctrl.physinfo xc in + let pcpus = physinfo.Xenctrl.nr_cpus in + physcpus := if pcpus > 0 then Array.make pcpus 0L else [||] ; + Xenctrl.pcpu_info xc pcpus + ) else + Xenctrl.pcpu_info xc len + in + let dss, len_newinfos = + Array.fold_left + (fun (acc, i) v -> + ( ( Rrd.Host + , Ds.ds_make ~name:(Printf.sprintf "cpu%d" i) ~units:"(fraction)" + ~description:("Physical cpu usage for cpu " ^ string_of_int i) + ~value:(Rrd.VT_Float (Int64.to_float v /. 1.0e9)) + ~min:0.0 ~max:1.0 ~ty:Rrd.Derive ~default:true + ~transform:(fun x -> 1.0 -. x) + () + ) + :: acc + , i + 1 + ) + ) + ([], 0) newinfos + in + let sum_array = Array.fold_left (fun acc v -> Int64.add acc v) 0L newinfos in + let avg_array = Int64.to_float sum_array /. float_of_int len_newinfos in + let avgcpu_ds = + ( Rrd.Host + , Ds.ds_make ~name:"cpu_avg" ~units:"(fraction)" + ~description:"Average physical cpu usage" + ~value:(Rrd.VT_Float (avg_array /. 1.0e9)) + ~min:0.0 ~max:1.0 ~ty:Rrd.Derive ~default:true + ~transform:(fun x -> 1.0 -. x) + () + ) + in + avgcpu_ds :: dss + +let dss_loadavg () = + [ + ( Rrd.Host + , Ds.ds_make ~name:"loadavg" ~units:"(fraction)" + ~description:"Domain0 loadavg" + ~value:(Rrd.VT_Float (Rrdd_common.loadavg ())) + ~ty:Rrd.Gauge ~default:true () + ) + ] + +let count_power_state_running_domains domains = + List.fold_left + (fun count (dom, _, _) -> + if not dom.Xenctrl.paused then count + 1 else count + ) + 0 domains + +let dss_hostload xc domains = + let physinfo = Xenctrl.physinfo xc in + let pcpus = physinfo.Xenctrl.nr_cpus in + let rec sum acc n f = + match n with n when n >= 0 -> sum (acc + f n) (n - 1) f | _ -> acc + in + let load = + List.fold_left + (fun acc (dom, _, domid) -> + sum 0 dom.Xenctrl.max_vcpu_id (fun id -> + let vcpuinfo = Xenctrl.domain_get_vcpuinfo xc domid id in + if vcpuinfo.Xenctrl.online && not vcpuinfo.Xenctrl.blocked then + 1 + else + 0 + ) + + acc + ) + 0 domains + in + let running_domains = count_power_state_running_domains domains in + + let load_per_cpu = float_of_int load /. float_of_int pcpus in + [ + ( Rrd.Host + , Ds.ds_make ~name:"hostload" ~units:"(fraction)" + ~description: + ("Host load per physical cpu, where load refers to " + ^ "the number of vCPU(s) in running or runnable status." + ) + ~value:(Rrd.VT_Float load_per_cpu) ~min:0.0 ~ty:Rrd.Gauge ~default:true + () + ) + ; ( Rrd.Host + , Ds.ds_make ~name:"running_vcpus" ~units:"count" + ~description:"The total number of running vCPUs per host" + ~value:(Rrd.VT_Int64 (Int64.of_int load)) + ~min:0.0 ~ty:Rrd.Gauge ~default:true () + ) + ; ( Rrd.Host + , Ds.ds_make ~name:"running_domains" ~units:"count" + ~description:"The total number of running domains per host" + ~value:(Rrd.VT_Int64 (Int64.of_int running_domains)) + ~min:0.0 ~ty:Rrd.Gauge ~default:true () + ) + ] + +let generate_cpu_ds_list xc domains () = + dss_pcpus xc @ dss_vcpus xc domains @ dss_loadavg () @ dss_hostload xc domains + +let _ = + Xenctrl.with_intf (fun xc -> + let _, domains, _ = Xenctrl_lib.domain_snapshot xc in + Process.initialise () ; + (* Share one page per PCPU and dom each *) + let physinfo = Xenctrl.physinfo xc in + let shared_page_count = physinfo.Xenctrl.nr_cpus + List.length domains in + + Process.main_loop ~neg_shift:0.5 + ~target:(Reporter.Local shared_page_count) ~protocol:Rrd_interface.V2 + ~dss_f:(generate_cpu_ds_list xc domains) + ) diff --git a/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.mli b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.mli new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ocaml/xcp-rrdd/bin/rrdp-scripts/sysconfig-rrdd-plugins b/ocaml/xcp-rrdd/bin/rrdp-scripts/sysconfig-rrdd-plugins index 97846e704e4..e0650a06dcd 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-scripts/sysconfig-rrdd-plugins +++ b/ocaml/xcp-rrdd/bin/rrdp-scripts/sysconfig-rrdd-plugins @@ -1 +1 @@ -PLUGINS="xcp-rrdd-iostat xcp-rrdd-squeezed xcp-rrdd-xenpm xcp-rrdd-dcmi xcp-rrdd-netdev" +PLUGINS="xcp-rrdd-iostat xcp-rrdd-squeezed xcp-rrdd-xenpm xcp-rrdd-dcmi xcp-rrdd-netdev xcp-rrdd-cpu" diff --git a/quality-gate.sh b/quality-gate.sh index 47e97fa37e2..11a6dee143a 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -25,7 +25,7 @@ verify-cert () { } mli-files () { - N=509 + N=508 # do not count ml files from the tests in ocaml/{tests/perftest/quicktest} MLIS=$(git ls-files -- '**/*.mli' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) MLS=$(git ls-files -- '**/*.ml' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) diff --git a/scripts/xe-toolstack-restart b/scripts/xe-toolstack-restart index 1ceeeddbe5b..25856dc67ad 100755 --- a/scripts/xe-toolstack-restart +++ b/scripts/xe-toolstack-restart @@ -18,7 +18,7 @@ LOCKFILE='/dev/shm/xe_toolstack_restart.lock' ( flock -x -n 200 -if [ "$?" != 0 ]; then +if [ "$?" != 0 ]; then echo "Exiting: cannot lock $LOCKFILE. Is an instance of $0 running already?" exit 1 fi @@ -29,7 +29,7 @@ POOLCONF=`cat @ETCXENDIR@/pool.conf` if [ $POOLCONF == "master" ]; then MPATHALERT="mpathalert"; else MPATHALERT=""; fi SERVICES="message-switch perfmon v6d xenopsd xenopsd-xc xenopsd-xenlight xenopsd-simulator xenopsd-libvirt xcp-rrdd-iostat xcp-rrdd-squeezed - xcp-rrdd-netdev + xcp-rrdd-netdev xcp-rrdd-cpu xcp-rrdd-xenpm xcp-rrdd-gpumon xcp-rrdd xcp-networkd squeezed forkexecd $MPATHALERT xapi-storage-script xapi-clusterd varstored-guard" From aa996553bc6cf4dfe7a85afaaaa4106d481f9e0c Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Wed, 25 Sep 2024 10:57:10 +0100 Subject: [PATCH 063/141] rrdp-iostat: Use a shared Xenctrl get_doms_stat function Signed-off-by: Andrii Sultanov --- ocaml/xcp-rrdd/bin/rrdp-iostat/dune | 3 +- ocaml/xcp-rrdd/bin/rrdp-iostat/rrdp_iostat.ml | 49 +++---------------- 2 files changed, 8 insertions(+), 44 deletions(-) diff --git a/ocaml/xcp-rrdd/bin/rrdp-iostat/dune b/ocaml/xcp-rrdd/bin/rrdp-iostat/dune index 4721f71aed1..3880709282a 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-iostat/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-iostat/dune @@ -4,12 +4,13 @@ (libraries astring cstruct - + ezxenstore.core inotify mtime mtime.clock.os rrdd-plugin + rrdd-plugin.xenctrl rrdd_plugins_libs str stringext diff --git a/ocaml/xcp-rrdd/bin/rrdp-iostat/rrdp_iostat.ml b/ocaml/xcp-rrdd/bin/rrdp-iostat/rrdp_iostat.ml index b8c60edec7e..1502a07f9fa 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-iostat/rrdp_iostat.ml +++ b/ocaml/xcp-rrdd/bin/rrdp-iostat/rrdp_iostat.ml @@ -22,41 +22,6 @@ module Process = Process (struct let name = "xcp-rrdd-iostat" end) open Process open Ezxenstore_core.Xenstore -let with_xc_and_xs f = Xenctrl.with_intf (fun xc -> with_xs (fun xs -> f xc xs)) - -(* Return a list of (domid, uuid) pairs for domUs running on this host *) -let get_running_domUs xc xs = - let metadata_of_domain di = - let open Xenctrl in - let domid = di.domid in - let ( let* ) = Option.bind in - let* uuid_raw = Uuidx.of_int_array di.handle in - let uuid = Uuidx.to_string uuid_raw in - - (* Actively hide migrating VM uuids, these are temporary and xenops - writes the original and the final uuid to xenstore *) - let uuid_from_key key = - let path = Printf.sprintf "/vm/%s/%s" uuid key in - try xs.read path - with Xs_protocol.Enoent _hint -> - D.info "Couldn't read path %s; falling back to actual uuid" path ; - uuid - in - let stable_uuid = Option.fold ~none:uuid ~some:uuid_from_key in - - let key = - if Astring.String.is_suffix ~affix:"000000000000" uuid then - Some "origin-uuid" - else if Astring.String.is_suffix ~affix:"000000000001" uuid then - Some "final-uuid" - else - None - in - Some (domid, stable_uuid key) - in - (* Do not list dom0 *) - Xenctrl.domain_getinfolist xc 1 |> List.filter_map metadata_of_domain - (* A mapping of VDIs to the VMs they are plugged to, in which position, and the device-id *) let vdi_to_vm_map : (string * (string * string * int)) list ref = ref [] @@ -71,11 +36,11 @@ let update_vdi_to_vm_map () = ["/local/domain/0/backend/vbd"; "/local/domain/0/backend/vbd3"] in try - let domUs = with_xc_and_xs get_running_domUs in + let _, domUs, _ = Xenctrl.with_intf Xenctrl_lib.domain_snapshot in D.debug "Running domUs: [%s]" (String.concat "; " (List.map - (fun (domid, uuid) -> + (fun (_, uuid, domid) -> Printf.sprintf "%d (%s)" domid (String.sub uuid 0 8) ) domUs @@ -83,7 +48,7 @@ let update_vdi_to_vm_map () = ) ; with_xs (fun xs -> List.map - (fun (domid, vm) -> + (fun (_, vm, domid) -> (* Get VBDs for this domain *) let enoents = ref 0 in let vbds = @@ -981,18 +946,16 @@ let gen_metrics () = in (* relations between dom-id, vm-uuid, device pos, dev-id, etc *) - let domUs = with_xc_and_xs get_running_domUs in + let _, domUs, _ = Xenctrl.with_intf Xenctrl_lib.domain_snapshot in let vdi_to_vm = get_vdi_to_vm_map () in let get_stats_blktap3_by_vdi vdi = if List.mem_assoc vdi vdi_to_vm then let vm_uuid, _pos, devid = List.assoc vdi vdi_to_vm in - match - List.filter (fun (_domid', vm_uuid') -> vm_uuid' = vm_uuid) domUs - with + match List.filter (fun (_, vm_uuid', _) -> vm_uuid' = vm_uuid) domUs with | [] -> (None, None) - | (domid, _vm_uuid) :: _ -> + | (_, _, domid) :: _ -> let find_blktap3 blktap3_assoc_list = let key = (domid, devid) in if List.mem_assoc key blktap3_assoc_list then From 6bf1d7407c4167e234b8802a19221b973b73ebd0 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Thu, 19 Sep 2024 07:59:56 +0100 Subject: [PATCH 064/141] Update docs about RRDD plugins Signed-off-by: Andrii Sultanov --- doc/content/toolstack/high-level/daemons.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/content/toolstack/high-level/daemons.md b/doc/content/toolstack/high-level/daemons.md index 103798bb0d5..bb1d7607fff 100644 --- a/doc/content/toolstack/high-level/daemons.md +++ b/doc/content/toolstack/high-level/daemons.md @@ -20,6 +20,9 @@ xcp-rrdd - xcp-rrdd-iostat - xcp-rrdd-squeezed - xcp-rrdd-xenpm + - xcp-rrdd-dcmi + - xcp-rrdd-netdev + - xcp-rrdd-cpu xcp-networkd : a host network manager which takes care of configuring interfaces, bridges From aa631b9e9b8976e6b41bb567fc9736e82fb9ef47 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Wed, 25 Sep 2024 13:34:38 +0100 Subject: [PATCH 065/141] rrdd: Add .service and config files Signed-off-by: Andrii Sultanov --- scripts/Makefile | 12 ++++++++++++ scripts/xcp-rrdd-conf | 6 ++++++ scripts/xcp-rrdd-cpu.service | 15 +++++++++++++++ scripts/xcp-rrdd-dcmi.service | 15 +++++++++++++++ scripts/xcp-rrdd-iostat.service | 15 +++++++++++++++ scripts/xcp-rrdd-netdev.service | 15 +++++++++++++++ scripts/xcp-rrdd-squeezed.service | 15 +++++++++++++++ scripts/xcp-rrdd-sysconfig | 3 +++ scripts/xcp-rrdd-tmp | 1 + scripts/xcp-rrdd-xenpm.service | 15 +++++++++++++++ scripts/xcp-rrdd.service | 21 +++++++++++++++++++++ 11 files changed, 133 insertions(+) create mode 100644 scripts/xcp-rrdd-conf create mode 100644 scripts/xcp-rrdd-cpu.service create mode 100644 scripts/xcp-rrdd-dcmi.service create mode 100644 scripts/xcp-rrdd-iostat.service create mode 100644 scripts/xcp-rrdd-netdev.service create mode 100644 scripts/xcp-rrdd-squeezed.service create mode 100644 scripts/xcp-rrdd-sysconfig create mode 100644 scripts/xcp-rrdd-tmp create mode 100644 scripts/xcp-rrdd-xenpm.service create mode 100644 scripts/xcp-rrdd.service diff --git a/scripts/Makefile b/scripts/Makefile index 4c04da3943c..7583c80d624 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -74,11 +74,23 @@ install: $(IDATA) xapi-nbd.path $(DESTDIR)/usr/lib/systemd/system/xapi-nbd.path $(IDATA) 10-stunnel-increase-number-of-file-descriptors.conf $(DESTDIR)/etc/systemd/system/stunnel@xapi.service.d/10-stunnel-increase-number-of-file-descriptors.conf $(IDATA) 11-stunnel-gencert.conf $(DESTDIR)/etc/systemd/system/stunnel@xapi.service.d/11-stunnel-gencert.conf + $(IDATA) xcp-rrdd.service $(DESTDIR)/usr/lib/systemd/system/xcp-rrdd.service + $(IDATA) xcp-rrdd-xenpm.service $(DESTDIR)/usr/lib/systemd/system/xcp-rrdd-xenpm.service + $(IDATA) xcp-rrdd-iostat.service $(DESTDIR)/usr/lib/systemd/system/xcp-rrdd-iostat.service + $(IDATA) xcp-rrdd-squeezed.service $(DESTDIR)/usr/lib/systemd/system/xcp-rrdd-squeezed.service + $(IDATA) xcp-rrdd-squeezed.service $(DESTDIR)/usr/lib/systemd/system/xcp-rrdd-squeezed.service + $(IDATA) xcp-rrdd-dcmi.service $(DESTDIR)/usr/lib/systemd/system/xcp-rrdd-dcmi.service + $(IDATA) xcp-rrdd-cpu.service $(DESTDIR)/usr/lib/systemd/system/xcp-rrdd-cpu.service + $(IDATA) xcp-rrdd-netdev.service $(DESTDIR)/usr/lib/systemd/system/xcp-rrdd-netdev.service mkdir -p $(DESTDIR)$(ETCXENDIR)/master.d $(IPROG) on-master-start $(DESTDIR)$(ETCXENDIR)/master.d/01-example $(IPROG) mpathalert-daemon $(DESTDIR)$(ETCXENDIR)/master.d/03-mpathalert-daemon mkdir -p $(DESTDIR)/etc/sysconfig $(IPROG) sysconfig-xapi $(DESTDIR)/etc/sysconfig/xapi + $(IPROG) xcp-rrdd-sysconfig $(DESTDIR)/etc/sysconfig/xcp-rrdd + $(IPROG) xcp-rrdd-conf $(DESTDIR)/etc/xcp-rrdd.conf + mkdir -p $(DESTDIR)/usr/lib/tmpfiles.d + $(IPROG) xcp-rrdd-tmp $(DESTDIR)/usr/lib/tmpfiles.d/xcp-rrdd.conf $(IPROG) nbd-firewall-config.sh $(DESTDIR)$(LIBEXECDIR) $(IPROG) update-ca-bundle.sh $(DESTDIR)$(OPTDIR)/bin mkdir -p $(DESTDIR)$(OPTDIR)/debug diff --git a/scripts/xcp-rrdd-conf b/scripts/xcp-rrdd-conf new file mode 100644 index 00000000000..5014b73d66e --- /dev/null +++ b/scripts/xcp-rrdd-conf @@ -0,0 +1,6 @@ +# The xcp-rrdd config file + +inventory = /etc/xensource-inventory + +disable-logging-for = http +loglevel = info diff --git a/scripts/xcp-rrdd-cpu.service b/scripts/xcp-rrdd-cpu.service new file mode 100644 index 00000000000..310828dda94 --- /dev/null +++ b/scripts/xcp-rrdd-cpu.service @@ -0,0 +1,15 @@ +[Unit] +Description=XCP RRD daemon CPU plugin +After=xcp-rrdd.service +Requires=xcp-rrdd.service + +[Service] +ExecStart=/opt/xensource/libexec/xcp-rrdd-plugins/xcp-rrdd-cpu +StandardError=null +# restart but fail if more than 5 failures in 30s +Restart=on-failure +StartLimitBurst=5 +StartLimitInterval=30s + +[Install] +WantedBy=multi-user.target diff --git a/scripts/xcp-rrdd-dcmi.service b/scripts/xcp-rrdd-dcmi.service new file mode 100644 index 00000000000..64bab4f25b3 --- /dev/null +++ b/scripts/xcp-rrdd-dcmi.service @@ -0,0 +1,15 @@ +[Unit] +Description=XCP RRD daemon IPMI DCMI power plugin +After=xcp-rrdd.service +Requires=xcp-rrdd.service + +[Service] +ExecStart=/opt/xensource/libexec/xcp-rrdd-plugins/xcp-rrdd-dcmi +StandardError=null +# restart but fail if more than 5 failures in 30s +Restart=on-failure +StartLimitBurst=5 +StartLimitInterval=30s + +[Install] +WantedBy=multi-user.target diff --git a/scripts/xcp-rrdd-iostat.service b/scripts/xcp-rrdd-iostat.service new file mode 100644 index 00000000000..ce724477367 --- /dev/null +++ b/scripts/xcp-rrdd-iostat.service @@ -0,0 +1,15 @@ +[Unit] +Description=XCP RRD daemon iostat plugin +After=xcp-rrdd.service +Requires=xcp-rrdd.service + +[Service] +ExecStart=/opt/xensource/libexec/xcp-rrdd-plugins/xcp-rrdd-iostat +StandardError=null +# restart but fail if more than 5 failures in 30s +Restart=on-failure +StartLimitBurst=5 +StartLimitInterval=30s + +[Install] +WantedBy=multi-user.target diff --git a/scripts/xcp-rrdd-netdev.service b/scripts/xcp-rrdd-netdev.service new file mode 100644 index 00000000000..b961cc9d15c --- /dev/null +++ b/scripts/xcp-rrdd-netdev.service @@ -0,0 +1,15 @@ +[Unit] +Description=XCP RRD daemon network plugin +After=xcp-rrdd.service +Requires=xcp-rrdd.service + +[Service] +ExecStart=/opt/xensource/libexec/xcp-rrdd-plugins/xcp-rrdd-netdev +StandardError=null +# restart but fail if more than 5 failures in 30s +Restart=on-failure +StartLimitBurst=5 +StartLimitInterval=30s + +[Install] +WantedBy=multi-user.target diff --git a/scripts/xcp-rrdd-squeezed.service b/scripts/xcp-rrdd-squeezed.service new file mode 100644 index 00000000000..bb33fca801c --- /dev/null +++ b/scripts/xcp-rrdd-squeezed.service @@ -0,0 +1,15 @@ +[Unit] +Description=XCP RRD daemon squeezed plugin +After=xcp-rrdd.service +Requires=xcp-rrdd.service + +[Service] +ExecStart=/opt/xensource/libexec/xcp-rrdd-plugins/xcp-rrdd-squeezed +StandardError=null +# restart but fail if more than 5 failures in 30s +Restart=on-failure +StartLimitBurst=5 +StartLimitInterval=30s + +[Install] +WantedBy=multi-user.target diff --git a/scripts/xcp-rrdd-sysconfig b/scripts/xcp-rrdd-sysconfig new file mode 100644 index 00000000000..b0c159f9016 --- /dev/null +++ b/scripts/xcp-rrdd-sysconfig @@ -0,0 +1,3 @@ +# Additional options for the XCP RRD deamon. +# XCP_RRDD_OPTIONS= : any extra command-line startup arguments for xcp-rddd +XCP_RRDD_OPTIONS= diff --git a/scripts/xcp-rrdd-tmp b/scripts/xcp-rrdd-tmp new file mode 100644 index 00000000000..b829da2fe3c --- /dev/null +++ b/scripts/xcp-rrdd-tmp @@ -0,0 +1 @@ +d /dev/shm/metrics 0775 root rrdmetrics - diff --git a/scripts/xcp-rrdd-xenpm.service b/scripts/xcp-rrdd-xenpm.service new file mode 100644 index 00000000000..092bb4d4bb9 --- /dev/null +++ b/scripts/xcp-rrdd-xenpm.service @@ -0,0 +1,15 @@ +[Unit] +Description=XCP RRD daemon xenpm plugin +After=xcp-rrdd.service +Requires=xcp-rrdd.service + +[Service] +ExecStart=/opt/xensource/libexec/xcp-rrdd-plugins/xcp-rrdd-xenpm +StandardError=null +# restart but fail if more than 5 failures in 30s +Restart=on-failure +StartLimitBurst=5 +StartLimitInterval=30s + +[Install] +WantedBy=multi-user.target diff --git a/scripts/xcp-rrdd.service b/scripts/xcp-rrdd.service new file mode 100644 index 00000000000..81e4d78df68 --- /dev/null +++ b/scripts/xcp-rrdd.service @@ -0,0 +1,21 @@ +[Unit] +Description=XCP RRD daemon +After=forkexecd.service xenstored.service message-switch.service syslog.target +Wants=forkexecd.service xenstored.service message-switch.service syslog.target + +[Service] +Type=notify +Environment="LD_PRELOAD=/usr/lib64/libjemalloc.so.2" +Environment="MALLOC_CONF=narenas:1,tcache:false" +Environment=OCAMLRUNPARAM=b +EnvironmentFile=-/etc/sysconfig/xcp-rrdd +ExecStart=/usr/sbin/xcp-rrdd $XCP_RRDD_OPTIONS +SuccessExitStatus=0 +# StandardError=null +# restart but fail if more than 5 failures in 30s +Restart=on-failure +StartLimitBurst=5 +StartLimitInterval=30s + +[Install] +WantedBy=multi-user.target From 0e914644aa53a1341192f78001856460de5cd783 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 14 Jul 2023 15:56:41 +0100 Subject: [PATCH 066/141] git: ignore another revision (from xapi-storage-script) Signed-off-by: Pau Ruiz Safont --- .git-blame-ignore-revs | 1 + 1 file changed, 1 insertion(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index d8259ca9cd8..739b485ae74 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -30,6 +30,7 @@ ff39018fd6d91985f9c893a56928771dfe9fa48d cbb9edb17dfd122c591beb14d1275acc39492335 d6ab15362548b8fe270bd14d5153b8d94e1b15c0 b12cf444edea15da6274975e1b2ca6a7fce2a090 +364c27f5d18ab9dd31825e67a93efabecad06823 # ocp-indent d018d26d6acd4707a23288b327b49e44f732725e From c2039ea868456c66ea9c33a17b503aafc809f456 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 14 Jul 2023 17:03:14 +0100 Subject: [PATCH 067/141] IH-397: Do not open Core Replace most of the usages with Stdlib, Sexplib0 or Base The String Hashtables, Sets, and Clocks will be replaced at a later time Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/main.ml | 261 +++++++++++++++--------------- 1 file changed, 133 insertions(+), 128 deletions(-) diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index fb4ac093489..6f4309c5930 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -13,10 +13,8 @@ *) module U = Unix module R = Rpc -module B = Backtrace -open Core open Async -open Xapi_storage_script_types +module Types = Xapi_storage_script_types module Plugin_client = Xapi_storage.Plugin.Plugin (Rpc_async.GenClient ()) module Volume_client = Xapi_storage.Control.Volume (Rpc_async.GenClient ()) module Sr_client = Xapi_storage.Control.Sr (Rpc_async.GenClient ()) @@ -43,7 +41,7 @@ let backend_backtrace_error name args backtrace = | ["Activated_on_another_host"; uuid] -> Errors.Activated_on_another_host uuid | _ -> - let backtrace = rpc_of_backtrace backtrace |> Jsonrpc.to_string in + let backtrace = Types.rpc_of_backtrace backtrace |> Jsonrpc.to_string in Errors.Backend_error_with_backtrace (name, backtrace :: args) let missing_uri () = @@ -68,7 +66,7 @@ let return_rpc typ result = (* In practice we'll always get a successful RPC response here (Ok), but we still have to transform the Error to make the types match: *) let result = - Result.map_error result ~f:(fun err -> + Base.Result.map_error result ~f:(fun err -> backend_error "SCRIPT_RETURNED_RPC_ERROR" [Rpcmarshal.marshal typ err |> R.to_string] ) @@ -85,7 +83,7 @@ let return_rpc typ result = return (Error (backend_error "SCRIPT_FAILED" - ["Unexpected exception:" ^ Exn.to_string e] + ["Unexpected exception:" ^ Base.Exn.to_string e] ) ) @@ -124,7 +122,7 @@ let pvs_version = "3.0" let supported_api_versions = [pvs_version; "5.0"] -let api_max = List.fold_left ~f:String.max supported_api_versions ~init:"" +let api_max = List.fold_left Base.String.max "" supported_api_versions let id x = x @@ -143,7 +141,7 @@ end) : sig (** Module for making the inputs and outputs compatible with the old PVS version of the storage scripts. *) - type device_config = (Core.String.t, string) Core.List.Assoc.t + type device_config = (string * string) list val compat_out_volume : compat_out (** Add the missing [sharable] field to the Dict in [rpc], to ensure the @@ -170,11 +168,11 @@ end) : sig (** Compatiblity for the old PVS version of SR.attach, which had signature [uri -> sr (=string)] *) end = struct - type device_config = (Core.String.t, string) Core.List.Assoc.t + type device_config = (string * string) list let with_pvs_version f rpc = match !V.version with - | Some v when String.(v = pvs_version) -> + | Some v when Base.String.(v = pvs_version) -> f rpc | _ -> rpc @@ -206,7 +204,7 @@ end = struct let add_fields_to_record_list_output fields = with_pvs_version (function | R.Enum l -> - R.Enum (List.map ~f:(add_fields_to_dict fields) l) + R.Enum (List.map (add_fields_to_dict fields) l) | rpc -> rpc ) @@ -221,8 +219,8 @@ end = struct old PVS scripts *) let compat_uri device_config = match !V.version with - | Some version when String.(version = pvs_version) -> ( - match List.Assoc.find ~equal:String.equal device_config "uri" with + | Some version when Base.String.(version = pvs_version) -> ( + match Base.List.Assoc.find ~equal:String.equal device_config "uri" with | None -> return (Error (missing_uri ())) | Some uri -> @@ -235,7 +233,7 @@ end = struct compat_uri device_config >>>= fun compat_in -> let compat_out = match !V.version with - | Some v when String.(v = pvs_version) -> ( + | Some v when Base.String.(v = pvs_version) -> ( function (* The PVS version will return nothing *) | R.Null -> @@ -254,19 +252,18 @@ end let check_plugin_version_compatible query_result = let Xapi_storage.Plugin.{name; required_api_version; _} = query_result in - if String.(required_api_version <> api_max) then + if Base.String.(required_api_version <> api_max) then warn "Using deprecated SMAPIv3 API version %s, latest is %s. Update your %s \ plugin!" required_api_version api_max name ; - if List.mem ~equal:String.equal supported_api_versions required_api_version - then + if List.mem required_api_version supported_api_versions then Deferred.Result.return () else let msg = Printf.sprintf "%s requires unknown SMAPI API version %s, supported: %s" name required_api_version - (String.concat ~sep:"," supported_api_versions) + (String.concat "," supported_api_versions) in return (Error (Storage_interface.Errors.No_storage_plugin_for_sr msg)) @@ -323,24 +320,26 @@ let is_executable path = module Script = struct (** We cache (lowercase script name -> original script name) mapping for the scripts in the root directory of every registered plugin. *) - let name_mapping = String.Table.create ~size:4 () + let name_mapping = Base.Hashtbl.create ~size:4 (module Base.String) let update_mapping ~script_dir = Sys.readdir script_dir >>| Array.to_list >>| fun files -> (* If there are multiple files which map to the same lowercase string, we just take the first one, instead of failing *) let mapping = - List.zip_exn files files - |> String.Caseless.Map.of_alist_reduce ~f:String.min + List.combine files files + |> Base.Map.of_alist_reduce + (module Base.String.Caseless) + ~f:Base.String.min in - Hashtbl.set name_mapping ~key:script_dir ~data:mapping + Base.Hashtbl.set name_mapping ~key:script_dir ~data:mapping let path ~script_dir ~script_name = let find () = let cached_script_name = - let ( >>?= ) = Option.( >>= ) in - Hashtbl.find name_mapping script_dir >>?= fun mapping -> - Core.String.Caseless.Map.find mapping script_name + let ( let* ) = Option.bind in + let* mapping = Base.Hashtbl.find name_mapping script_dir in + Base.Map.find mapping script_name in let script_name = Option.value cached_script_name ~default:script_name in let path = Filename.concat script_dir script_name in @@ -369,10 +368,10 @@ let observer_is_component_enabled () = let is_enabled () = let is_config_file path = Filename.check_suffix path ".observer.conf" in let* files = Sys.readdir observer_config_dir in - return (Array.exists files ~f:is_config_file) + return (Array.exists is_config_file files) in let* result = Monitor.try_with ~extract_exn:true is_enabled in - return (Option.value (Result.ok result) ~default:false) + return (Option.value (Result.to_option result) ~default:false) (** Call the script named after the RPC method in the [script_dir] directory. The arguments (not the whole JSON-RPC call) are passed as JSON @@ -398,7 +397,7 @@ let fork_exec_rpc : fun ~script_dir ?missing ?(compat_in = id) ?(compat_out = id) ?dbg -> let invoke_script call script_name : (R.response, Storage_interface.Errors.error) Deferred.Result.t = - let traceparent = Option.bind dbg ~f:Debug_info.traceparent_of_dbg in + let traceparent = Option.bind dbg Debug_info.traceparent_of_dbg in let args = ["--json"] in let script_name, args, env = match (traceparent, config.use_observer) with @@ -416,10 +415,12 @@ let fork_exec_rpc : in Process.create ~env:(`Extend env) ~prog:script_name ~args () >>= function | Error e -> - error "%s failed: %s" script_name (Error.to_string_hum e) ; + error "%s failed: %s" script_name (Base.Error.to_string_hum e) ; return (Error - (backend_error "SCRIPT_FAILED" [script_name; Error.to_string_hum e]) + (backend_error "SCRIPT_FAILED" + [script_name; Base.Error.to_string_hum e] + ) ) | Ok p -> ( (* Send the request as json on stdin *) @@ -453,7 +454,7 @@ let fork_exec_rpc : | Error (`Exit_non_zero code) -> ( (* Expect an exception and backtrace on stdout *) match - Or_error.try_with (fun () -> + Base.Or_error.try_with (fun () -> Jsonrpc.of_string output.Process.Output.stdout ) with @@ -475,7 +476,9 @@ let fork_exec_rpc : ) ) | Ok response -> ( - match Or_error.try_with (fun () -> error_of_rpc response) with + match + Base.Or_error.try_with (fun () -> Types.error_of_rpc response) + with | Error _ -> error "%s failed and printed bad error json: %s" script_name output.Process.Output.stdout ; @@ -516,7 +519,7 @@ let fork_exec_rpc : (* Parse the json on stdout. We get back a JSON-RPC value from the scripts, not a complete JSON-RPC response *) match - Or_error.try_with (fun () -> + Base.Or_error.try_with (fun () -> Jsonrpc.of_string output.Process.Output.stdout ) with @@ -570,7 +573,9 @@ let fork_exec_rpc : error "%s is not executable" path ; return (Error - (backend_error "SCRIPT_NOT_EXECUTABLE" [path; Exn.to_string exn]) + (backend_error "SCRIPT_NOT_EXECUTABLE" + [path; Base.Exn.to_string exn] + ) ) | Ok path -> invoke_script call path @@ -585,38 +590,47 @@ let fork_exec_rpc : let rpc : R.call -> R.response Deferred.t = fun call -> script_rpc call >>= fun result -> - Result.map_error ~f:(fun e -> Fork_exec_error e) result - |> Result.ok_exn + Base.Result.map_error ~f:(fun e -> Fork_exec_error e) result + |> Base.Result.ok_exn |> return in rpc +let string_of_sexp = Sexplib0.Sexp_conv.string_of_sexp + +let sexp_of_string = Sexplib0.Sexp_conv.sexp_of_string + +let list_of_sexp = Sexplib0.Sexp_conv.list_of_sexp + +let sexp_of_list = Sexplib0.Sexp_conv.sexp_of_list + module Attached_SRs = struct type state = {sr: string; uids: string list} [@@deriving sexp] - let sr_table : state String.Table.t ref = ref (String.Table.create ()) + let sr_table : (string, state) Base.Hashtbl.t ref = + ref (Base.Hashtbl.create (module Base.String)) let state_path = ref None let add smapiv2 plugin uids = let key = Storage_interface.Sr.string_of smapiv2 in - Hashtbl.set !sr_table ~key ~data:{sr= plugin; uids} ; + Base.Hashtbl.set !sr_table ~key ~data:{sr= plugin; uids} ; ( match !state_path with | None -> return () | Some path -> let contents = - String.Table.sexp_of_t sexp_of_state !sr_table + Core.String.Table.sexp_of_t sexp_of_state !sr_table |> Sexplib.Sexp.to_string in let dir = Filename.dirname path in - Unix.mkdir ~p:() dir >>= fun () -> Writer.save path ~contents + Async_unix.Unix.mkdir dir >>= fun () -> Async.Writer.save path ~contents ) >>= fun () -> return (Ok ()) let find smapiv2 = let key = Storage_interface.Sr.string_of smapiv2 in - match Hashtbl.find !sr_table key with + match Base.Hashtbl.find !sr_table key with | None -> let open Storage_interface in return (Error (Errors.Sr_not_attached key)) @@ -625,7 +639,7 @@ module Attached_SRs = struct let get_uids smapiv2 = let key = Storage_interface.Sr.string_of smapiv2 in - match Hashtbl.find !sr_table key with + match Base.Hashtbl.find !sr_table key with | None -> let open Storage_interface in return (Error (Errors.Sr_not_attached key)) @@ -634,12 +648,12 @@ module Attached_SRs = struct let remove smapiv2 = let key = Storage_interface.Sr.string_of smapiv2 in - Hashtbl.remove !sr_table key ; + Base.Hashtbl.remove !sr_table key ; return (Ok ()) let list () = let srs = - Hashtbl.fold !sr_table + Base.Hashtbl.fold !sr_table ~f:(fun ~key ~data:_ ac -> Storage_interface.Sr.of_string key :: ac) ~init:[] in @@ -647,7 +661,7 @@ module Attached_SRs = struct let reload path = state_path := Some path ; - Sys.is_file ~follow_symlinks:true path >>= function + Async.Sys.is_file ~follow_symlinks:true path >>= function | `No | `Unknown -> return () | `Yes -> @@ -655,12 +669,12 @@ module Attached_SRs = struct sr_table := contents |> Sexplib.Sexp.of_string - |> String.Table.t_of_sexp state_of_sexp ; + |> Core.String.Table.t_of_sexp state_of_sexp ; return () end module Datapath_plugins = struct - let table = String.Table.create () + let table = Base.Hashtbl.create (module Base.String) let register ~datapath_root datapath_plugin_name = let result = @@ -672,7 +686,7 @@ module Datapath_plugins = struct check_plugin_version_compatible response >>= function | Ok () -> info "Registered datapath plugin %s" datapath_plugin_name ; - Hashtbl.set table ~key:datapath_plugin_name + Base.Hashtbl.set table ~key:datapath_plugin_name ~data:(script_dir, response) ; return (Ok ()) | Error e -> @@ -689,23 +703,20 @@ module Datapath_plugins = struct result >>= fun _ -> return () let unregister datapath_plugin_name = - Hashtbl.remove table datapath_plugin_name ; + Base.Hashtbl.remove table datapath_plugin_name ; return () let supports_feature scheme feature = - match Hashtbl.find table scheme with + match Base.Hashtbl.find table scheme with | None -> false | Some (_script_dir, query_result) -> - List.mem query_result.Xapi_storage.Plugin.features feature - ~equal:String.equal + List.mem feature query_result.Xapi_storage.Plugin.features end let vdi_of_volume x = let find key ~default ~of_string = - match - List.Assoc.find x.Xapi_storage.Control.keys key ~equal:String.equal - with + match List.assoc_opt key x.Xapi_storage.Control.keys with | None -> default | Some v -> @@ -739,7 +750,7 @@ let choose_datapath ?(persistent = true) domain response = to name the datapath plugin. *) let possible = List.filter_map - ~f:(fun x -> + (fun x -> let uri = Uri.of_string x in match Uri.scheme uri with | None -> @@ -752,8 +763,8 @@ let choose_datapath ?(persistent = true) domain response = (* We can only use URIs whose schemes correspond to registered plugins *) let possible = List.filter_map - ~f:(fun (scheme, uri) -> - match Hashtbl.find Datapath_plugins.table scheme with + (fun (scheme, uri) -> + match Base.Hashtbl.find Datapath_plugins.table scheme with | Some (script_dir, _query_result) -> Some (script_dir, scheme, uri) | None -> @@ -767,8 +778,8 @@ let choose_datapath ?(persistent = true) domain response = possible else let supports_nonpersistent, others = - List.partition_tf - ~f:(fun (_script_dir, scheme, _uri) -> + List.partition + (fun (_script_dir, scheme, _uri) -> Datapath_plugins.supports_feature scheme _nonpersistent ) possible @@ -812,8 +823,8 @@ let bind ~volume_script_dir = * Volume.set and Volume.unset *) (* TODO handle this properly? *) let missing = - Option.bind !version ~f:(fun v -> - if String.(v = pvs_version) then Some (R.rpc_of_unit ()) else None + Option.bind !version (fun v -> + if String.equal v pvs_version then Some (R.rpc_of_unit ()) else None ) in return_volume_rpc (fun () -> @@ -822,8 +833,8 @@ let bind ~volume_script_dir = in let unset ~dbg ~sr ~vdi ~key = let missing = - Option.bind !version ~f:(fun v -> - if String.(v = pvs_version) then Some (R.rpc_of_unit ()) else None + Option.bind !version (fun v -> + if String.equal v pvs_version then Some (R.rpc_of_unit ()) else None ) in return_volume_rpc (fun () -> @@ -848,8 +859,7 @@ let bind ~volume_script_dir = stat ~dbg ~sr ~vdi >>= fun response -> (* If we have a clone-on-boot volume then use that instead *) ( match - List.Assoc.find response.Xapi_storage.Control.keys _clone_on_boot_key - ~equal:String.equal + List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> return (Ok response) @@ -875,7 +885,7 @@ let bind ~volume_script_dir = (* Convert between the xapi-storage interface and the SMAPI *) let features = List.map - ~f:(function "VDI_DESTROY" -> "VDI_DELETE" | x -> x) + (function "VDI_DESTROY" -> "VDI_DELETE" | x -> x) response.Xapi_storage.Plugin.features in (* Look for executable scripts and automatically add capabilities *) @@ -922,7 +932,7 @@ let bind ~volume_script_dir = (* If we have the ability to clone a disk then we can provide clone on boot. *) let features = - if List.mem features "VDI_CLONE" ~equal:String.equal then + if List.mem "VDI_CLONE" features then "VDI_RESET_ON_BOOT/2" :: features else features @@ -980,7 +990,7 @@ let bind ~volume_script_dir = let uid = Uri.path_unencoded uri in let uid = if String.length uid > 1 then - String.sub uid ~pos:1 ~len:(String.length uid - 1) + String.sub uid 1 (String.length uid - 1) else uid in @@ -1027,7 +1037,7 @@ let bind ~volume_script_dir = let uid = Uri.path_unencoded uri in let uid = if String.length uid > 1 then - String.sub uid ~pos:1 ~len:(String.length uid - 1) + String.sub uid 1 (String.length uid - 1) else uid in @@ -1061,10 +1071,10 @@ let bind ~volume_script_dir = |> Jsonrpc.to_string in response - |> List.map ~f:(fun probe_result -> + |> List.map (fun probe_result -> let uuid = - List.Assoc.find probe_result.Xapi_storage.Control.configuration - ~equal:String.equal "sr_uuid" + List.assoc_opt "sr_uuid" + probe_result.Xapi_storage.Control.configuration in let open Deferred.Or_error in let smapiv2_probe ?sr_info () = @@ -1118,7 +1128,8 @@ let bind ~volume_script_dir = ) |> Deferred.Or_error.combine_errors |> Deferred.Result.map_error ~f:(fun err -> - backend_error "SCRIPT_FAILED" ["SR.probe"; Error.to_string_hum err] + backend_error "SCRIPT_FAILED" + ["SR.probe"; Base.Error.to_string_hum err] ) >>>= fun results -> Deferred.Result.return (Storage_interface.Probe results) @@ -1184,25 +1195,27 @@ let bind ~volume_script_dir = let response = Array.to_list response in (* Filter out volumes which are clone-on-boot transients *) let transients = - List.fold - ~f:(fun set x -> + List.fold_left + (fun set x -> match - List.Assoc.find x.Xapi_storage.Control.keys - _clone_on_boot_key ~equal:String.equal + List.assoc_opt _clone_on_boot_key x.Xapi_storage.Control.keys with | None -> set | Some transient -> - Set.add set transient + Base.Set.add set transient ) - ~init:Core.String.Set.empty response + (Base.Set.empty (module Base.String)) + response in let response = List.filter - ~f:(fun x -> not (Set.mem transients x.Xapi_storage.Control.key)) + (fun x -> + not (Base.Set.mem transients x.Xapi_storage.Control.key) + ) response in - Deferred.Result.return (List.map ~f:vdi_of_volume response) + Deferred.Result.return (List.map vdi_of_volume response) ) |> wrap in @@ -1243,25 +1256,26 @@ let bind ~volume_script_dir = let response = Array.to_list response in (* Filter out volumes which are clone-on-boot transients *) let transients = - List.fold - ~f:(fun set x -> + List.fold_left + (fun set x -> match - List.Assoc.find x.Xapi_storage.Control.keys _clone_on_boot_key - ~equal:String.equal + Base.List.Assoc.find x.Xapi_storage.Control.keys + _clone_on_boot_key ~equal:String.equal with | None -> set | Some transient -> - Set.add set transient + Base.Set.add set transient ) - ~init:Core.String.Set.empty response + (Base.Set.empty (module Base.String)) + response in let response = List.filter - ~f:(fun x -> not (Set.mem transients x.Xapi_storage.Control.key)) + (fun x -> not (Base.Set.mem transients x.Xapi_storage.Control.key)) response in - Deferred.Result.return (List.map ~f:vdi_of_volume response, sr_info) + Deferred.Result.return (List.map vdi_of_volume response, sr_info) in let rec stat_with_retry ?(times = 3) sr = get_sr_info sr >>>= fun sr_info -> @@ -1272,7 +1286,7 @@ let bind ~volume_script_dir = | Unreachable when times > 0 -> debug "%s: sr %s is unreachable, remaining %d retries" __FUNCTION__ sr_uuid times ; - Clock.after Time.Span.second >>= fun () -> + Clock.after Core.Time.Span.second >>= fun () -> stat_with_retry ~times:(times - 1) sr | health -> debug "%s: sr unhealthy because it is %s" __FUNCTION__ @@ -1306,8 +1320,7 @@ let bind ~volume_script_dir = stat ~dbg ~sr ~vdi >>>= fun response -> (* Destroy any clone-on-boot volume that might exist *) ( match - List.Assoc.find response.Xapi_storage.Control.keys _clone_on_boot_key - ~equal:String.equal + List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> return (Ok ()) @@ -1435,7 +1448,7 @@ let bind ~volume_script_dir = Deferred.Result.return { Storage_interface.implementations= - List.map ~f:convert_implementation + List.map convert_implementation response.Xapi_storage.Data.implementations } ) @@ -1450,8 +1463,7 @@ let bind ~volume_script_dir = stat ~dbg ~sr ~vdi >>>= fun response -> (* If we have a clone-on-boot volume then use that instead *) ( match - List.Assoc.find response.Xapi_storage.Control.keys _clone_on_boot_key - ~equal:String.equal + List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> return (Ok response) @@ -1485,8 +1497,7 @@ let bind ~volume_script_dir = (* Discover the URIs using Volume.stat *) stat ~dbg ~sr ~vdi >>>= fun response -> ( match - List.Assoc.find response.Xapi_storage.Control.keys _clone_on_boot_key - ~equal:String.equal + List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> return (Ok response) @@ -1509,8 +1520,7 @@ let bind ~volume_script_dir = (* Discover the URIs using Volume.stat *) stat ~dbg ~sr ~vdi >>>= fun response -> ( match - List.Assoc.find response.Xapi_storage.Control.keys _clone_on_boot_key - ~equal:String.equal + List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> return (Ok response) @@ -1573,8 +1583,7 @@ let bind ~volume_script_dir = (* We create a non-persistent disk here with Volume.clone, and store the name of the cloned disk in the metadata of the original. *) ( match - List.Assoc.find response.Xapi_storage.Control.keys _clone_on_boot_key - ~equal:String.equal + List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> Deferred.Result.return () @@ -1603,8 +1612,7 @@ let bind ~volume_script_dir = return_data_rpc (fun () -> Datapath_client.close (rpc ~dbg) dbg uri) else match - List.Assoc.find response.Xapi_storage.Control.keys _clone_on_boot_key - ~equal:String.equal + List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> Deferred.Result.return () @@ -1628,8 +1636,7 @@ let bind ~volume_script_dir = (* Discover the URIs using Volume.stat *) stat ~dbg ~sr ~vdi >>>= fun response -> ( match - List.Assoc.find response.Xapi_storage.Control.keys _clone_on_boot_key - ~equal:String.equal + List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> return (Ok response) @@ -1688,7 +1695,7 @@ let bind ~volume_script_dir = ) in let proj_bitmap r = r.Xapi_storage.Control.bitmap in - return (Result.map ~f:proj_bitmap result) + return (Result.map proj_bitmap result) in S.VDI.list_changed_blocks vdi_list_changed_blocks_impl ; let vdi_data_destroy_impl dbg sr vdi = @@ -1747,7 +1754,7 @@ let process_smapiv2_requests server txt = Deferred.return (Jsonrpc.string_of_response response) (** Active servers, one per sub-directory of the volume_root_dir *) -let servers = String.Table.create () ~size:4 +let servers = Base.Hashtbl.create ~size:4 (module Base.String) (* XXX: need a better error-handling strategy *) let get_ok = function @@ -1765,7 +1772,7 @@ let rec diff a b = | [] -> [] | a :: aa -> - if List.mem b a ~equal:String.( = ) then diff aa b else a :: diff aa b + if List.mem a b then diff aa b else a :: diff aa b (* default false due to bugs in SMAPIv3 plugins, once they are fixed this should be set to true *) @@ -1773,7 +1780,7 @@ let concurrent = ref false let watch_volume_plugins ~volume_root ~switch_path ~pipe = let create volume_plugin_name = - if Hashtbl.mem servers volume_plugin_name then + if Base.Hashtbl.mem servers volume_plugin_name then return () else ( info "Adding %s" volume_plugin_name ; @@ -1787,16 +1794,16 @@ let watch_volume_plugins ~volume_root ~switch_path ~pipe = () >>= fun result -> let server = get_ok result in - Hashtbl.add_exn servers ~key:volume_plugin_name ~data:server ; + Base.Hashtbl.add_exn servers ~key:volume_plugin_name ~data:server ; return () ) in let destroy volume_plugin_name = info "Removing %s" volume_plugin_name ; - match Hashtbl.find servers volume_plugin_name with + match Base.Hashtbl.find servers volume_plugin_name with | Some t -> Message_switch_async.Protocol_async.Server.shutdown ~t () >>= fun () -> - Hashtbl.remove servers volume_plugin_name ; + Base.Hashtbl.remove servers volume_plugin_name ; return () | None -> return () @@ -1804,10 +1811,9 @@ let watch_volume_plugins ~volume_root ~switch_path ~pipe = let sync () = Sys.readdir volume_root >>= fun names -> let needed : string list = Array.to_list names in - let got_already : string list = Hashtbl.keys servers in - Deferred.all_unit (List.map ~f:create (diff needed got_already)) - >>= fun () -> - Deferred.all_unit (List.map ~f:destroy (diff got_already needed)) + let got_already : string list = Base.Hashtbl.keys servers in + Deferred.all_unit (List.map create (diff needed got_already)) >>= fun () -> + Deferred.all_unit (List.map destroy (diff got_already needed)) in sync () >>= fun () -> let open Async_inotify.Event in @@ -1836,15 +1842,15 @@ let watch_datapath_plugins ~datapath_root ~pipe = let sync () = Sys.readdir datapath_root >>= fun names -> let needed : string list = Array.to_list names in - let got_already : string list = Hashtbl.keys servers in + let got_already : string list = Base.Hashtbl.keys servers in Deferred.all_unit (List.map - ~f:(Datapath_plugins.register ~datapath_root) + (Datapath_plugins.register ~datapath_root) (diff needed got_already) ) >>= fun () -> Deferred.all_unit - (List.map ~f:Datapath_plugins.unregister (diff got_already needed)) + (List.map Datapath_plugins.unregister (diff got_already needed)) in sync () >>= fun () -> let open Async_inotify.Event in @@ -1921,8 +1927,7 @@ let self_test_plugin ~root_dir plugin = Test.VDI.destroy rpc dbg sr vdi_info.vdi >>= fun () -> Test.SR.stat rpc dbg sr >>= fun _sr_info -> Test.SR.scan rpc dbg sr >>= fun _sr_list -> - if List.mem query_result.features "SR_PROBE" ~equal:String.equal - then + if List.mem "SR_PROBE" query_result.features then Test.SR.probe rpc dbg plugin device_config [] >>= fun _result -> return () else @@ -1967,15 +1972,15 @@ let main ~root_dir ~state_path ~switch_path = info "main thread shutdown cleanly" ; return () | Error x -> - error "main thread failed with %s" (Exn.to_string x) ; - Clock.after (Time.Span.of_sec 5.) >>= fun () -> loop () + error "main thread failed with %s" (Base.Exn.to_string x) ; + Clock.after (Core.Time.Span.of_sec 5.) >>= fun () -> loop () in loop () open Xcp_service let description = - String.concat ~sep:" " + String.concat " " [ "Allow xapi storage adapters to be written as individual scripts." ; "To add a storage adapter, create a sub-directory in the --root directory" @@ -2060,10 +2065,10 @@ let _ = info "main thread shutdown cleanly" ; return () | Error x -> - error "main thread failed with %s" (Exn.to_string x) ; - Clock.after (Time.Span.of_sec 5.) >>= fun () -> loop () + error "main thread failed with %s" (Base.Exn.to_string x) ; + Clock.after (Core.Time.Span.of_sec 5.) >>= fun () -> loop () in loop () in ignore (run ()) ; - never_returns (Scheduler.go ()) + Core.never_returns (Scheduler.go ()) From d09f53de9093345cb761bd25fe9eb59079d76091 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 14 Jul 2023 17:19:26 +0100 Subject: [PATCH 068/141] IH-397: Do not open Async Replace it with Async_kernel wherever possible Useful to get familiar with the code and delimit which are its users Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/main.ml | 99 +++++++++++++++++-------------- 1 file changed, 54 insertions(+), 45 deletions(-) diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 6f4309c5930..21f3ee03a08 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -13,14 +13,21 @@ *) module U = Unix module R = Rpc -open Async module Types = Xapi_storage_script_types module Plugin_client = Xapi_storage.Plugin.Plugin (Rpc_async.GenClient ()) module Volume_client = Xapi_storage.Control.Volume (Rpc_async.GenClient ()) module Sr_client = Xapi_storage.Control.Sr (Rpc_async.GenClient ()) module Datapath_client = Xapi_storage.Data.Datapath (Rpc_async.GenClient ()) -let ( >>>= ) = Deferred.Result.( >>= ) +let ( >>= ) = Async_kernel.( >>= ) + +let ( >>| ) = Async_kernel.( >>| ) + +let ( >>>= ) = Async_kernel.Deferred.Result.( >>= ) + +let return = Async_kernel.return + +module Deferred = Async_kernel.Deferred type config = {mutable use_observer: bool} @@ -58,7 +65,7 @@ let missing_uri () = let return_rpc typ result = (* Operator to unwrap the wrapped async return type of ocaml-rpc's Rpc_async *) let ( >*= ) a b = a |> Rpc_async.T.get >>= b in - Monitor.try_with ~extract_exn:true (fun () -> + Async_kernel.Monitor.try_with ~extract_exn:true (fun () -> (* We need to delay the evaluation of [result] until now, because when fork_exec_rpc is called by GenClient.declare, it might immediately raise a Fork_exec_error *) @@ -99,6 +106,7 @@ let use_syslog = ref false let log level fmt = Printf.ksprintf (fun s -> + let module Writer = Async.Writer in if !use_syslog then (* FIXME: this is synchronous and will block other I/O. * This should use Log_extended.Syslog, but that brings in Core's Syslog module @@ -306,11 +314,11 @@ let _is_a_snapshot_key = "is_a_snapshot" let _snapshot_of_key = "snapshot_of" let is_executable path = - Sys.is_file ~follow_symlinks:true path >>= function + Async.Sys.is_file ~follow_symlinks:true path >>= function | `No | `Unknown -> return (Error (`missing path)) | `Yes -> ( - Unix.access path [`Exec] >>= function + Async.Unix.access path [`Exec] >>= function | Error exn -> return (Error (`not_executable (path, exn))) | Ok () -> @@ -323,7 +331,7 @@ module Script = struct let name_mapping = Base.Hashtbl.create ~size:4 (module Base.String) let update_mapping ~script_dir = - Sys.readdir script_dir >>| Array.to_list >>| fun files -> + Async.Sys.readdir script_dir >>| Array.to_list >>| fun files -> (* If there are multiple files which map to the same lowercase string, we just take the first one, instead of failing *) let mapping = @@ -367,10 +375,10 @@ let observer_is_component_enabled () = let ( let* ) = ( >>= ) in let is_enabled () = let is_config_file path = Filename.check_suffix path ".observer.conf" in - let* files = Sys.readdir observer_config_dir in + let* files = Async.Sys.readdir observer_config_dir in return (Array.exists is_config_file files) in - let* result = Monitor.try_with ~extract_exn:true is_enabled in + let* result = Async.Monitor.try_with ~extract_exn:true is_enabled in return (Option.value (Result.to_option result) ~default:false) (** Call the script named after the RPC method in the [script_dir] @@ -413,7 +421,7 @@ let fork_exec_rpc : | _ -> (script_name, args, []) in - Process.create ~env:(`Extend env) ~prog:script_name ~args () >>= function + Async.Process.create ~env:(`Extend env) ~prog:script_name ~args () >>= function | Error e -> error "%s failed: %s" script_name (Base.Error.to_string_hum e) ; return @@ -424,7 +432,7 @@ let fork_exec_rpc : ) | Ok p -> ( (* Send the request as json on stdin *) - let w = Process.stdin p in + let w = Async.Process.stdin p in (* We pass just the args, not the complete JSON-RPC call. Currently the Python code generated by rpclib requires all params to be named - they will be converted into a name->value Python dict. @@ -447,22 +455,22 @@ let fork_exec_rpc : ) >>>= fun args -> let args = compat_in args in - Writer.write w (Jsonrpc.to_string args) ; - Writer.close w >>= fun () -> - Process.collect_output_and_wait p >>= fun output -> - match output.Process.Output.exit_status with + Async.Writer.write w (Jsonrpc.to_string args) ; + Async.Writer.close w >>= fun () -> + Async.Process.collect_output_and_wait p >>= fun output -> + match output.Async.Process.Output.exit_status with | Error (`Exit_non_zero code) -> ( (* Expect an exception and backtrace on stdout *) match Base.Or_error.try_with (fun () -> - Jsonrpc.of_string output.Process.Output.stdout + Jsonrpc.of_string output.Async.Process.Output.stdout ) with | Error _ -> error "%s failed and printed bad error json: %s" script_name - output.Process.Output.stdout ; + output.Async.Process.Output.stdout ; error "%s failed, stderr: %s" script_name - output.Process.Output.stderr ; + output.Async.Process.Output.stderr ; return (Error (backend_error "SCRIPT_FAILED" @@ -470,8 +478,8 @@ let fork_exec_rpc : script_name ; "non-zero exit and bad json on stdout" ; string_of_int code - ; output.Process.Output.stdout - ; output.Process.Output.stdout + ; output.Async.Process.Output.stdout + ; output.Async.Process.Output.stdout ] ) ) @@ -481,9 +489,9 @@ let fork_exec_rpc : with | Error _ -> error "%s failed and printed bad error json: %s" script_name - output.Process.Output.stdout ; + output.Async.Process.Output.stdout ; error "%s failed, stderr: %s" script_name - output.Process.Output.stderr ; + output.Async.Process.Output.stderr ; return (Error (backend_error "SCRIPT_FAILED" @@ -491,8 +499,8 @@ let fork_exec_rpc : script_name ; "non-zero exit and bad json on stdout" ; string_of_int code - ; output.Process.Output.stdout - ; output.Process.Output.stdout + ; output.Async.Process.Output.stdout + ; output.Async.Process.Output.stdout ] ) ) @@ -509,9 +517,9 @@ let fork_exec_rpc : [ script_name ; "signalled" - ; Signal.to_string signal - ; output.Process.Output.stdout - ; output.Process.Output.stdout + ; Async.Signal.to_string signal + ; output.Async.Process.Output.stdout + ; output.Async.Process.Output.stdout ] ) ) @@ -520,24 +528,25 @@ let fork_exec_rpc : value from the scripts, not a complete JSON-RPC response *) match Base.Or_error.try_with (fun () -> - Jsonrpc.of_string output.Process.Output.stdout + Jsonrpc.of_string output.Async.Process.Output.stdout ) with | Error _ -> error "%s succeeded but printed bad json: %s" script_name - output.Process.Output.stdout ; + output.Async.Process.Output.stdout ; return (Error (backend_error "SCRIPT_FAILED" [ script_name ; "bad json on stdout" - ; output.Process.Output.stdout + ; output.Async.Process.Output.stdout ] ) ) | Ok response -> - info "%s succeeded: %s" script_name output.Process.Output.stdout ; + info "%s succeeded: %s" script_name + output.Async.Process.Output.stdout ; let response = compat_out response in let response = R.success response in return (Ok response) @@ -665,7 +674,7 @@ module Attached_SRs = struct | `No | `Unknown -> return () | `Yes -> - Reader.file_contents path >>= fun contents -> + Async.Reader.file_contents path >>= fun contents -> sr_table := contents |> Sexplib.Sexp.of_string @@ -1286,7 +1295,7 @@ let bind ~volume_script_dir = | Unreachable when times > 0 -> debug "%s: sr %s is unreachable, remaining %d retries" __FUNCTION__ sr_uuid times ; - Clock.after Core.Time.Span.second >>= fun () -> + Async.Clock.after Core.Time.Span.second >>= fun () -> stat_with_retry ~times:(times - 1) sr | health -> debug "%s: sr unhealthy because it is %s" __FUNCTION__ @@ -1809,7 +1818,7 @@ let watch_volume_plugins ~volume_root ~switch_path ~pipe = return () in let sync () = - Sys.readdir volume_root >>= fun names -> + Async.Sys.readdir volume_root >>= fun names -> let needed : string list = Array.to_list names in let got_already : string list = Base.Hashtbl.keys servers in Deferred.all_unit (List.map create (diff needed got_already)) >>= fun () -> @@ -1818,10 +1827,10 @@ let watch_volume_plugins ~volume_root ~switch_path ~pipe = sync () >>= fun () -> let open Async_inotify.Event in let rec loop () = - (Pipe.read pipe >>= function + (Async.Pipe.read pipe >>= function | `Eof -> info "Received EOF from inotify event pipe" ; - Shutdown.exit 1 + Async.Shutdown.exit 1 | `Ok (Created path) | `Ok (Moved (Into path)) -> create (Filename.basename path) | `Ok (Unlinked path) | `Ok (Moved (Away path)) -> @@ -1840,7 +1849,7 @@ let watch_volume_plugins ~volume_root ~switch_path ~pipe = let watch_datapath_plugins ~datapath_root ~pipe = let sync () = - Sys.readdir datapath_root >>= fun names -> + Async.Sys.readdir datapath_root >>= fun names -> let needed : string list = Array.to_list names in let got_already : string list = Base.Hashtbl.keys servers in Deferred.all_unit @@ -1855,10 +1864,10 @@ let watch_datapath_plugins ~datapath_root ~pipe = sync () >>= fun () -> let open Async_inotify.Event in let rec loop () = - (Pipe.read pipe >>= function + (Async.Pipe.read pipe >>= function | `Eof -> info "Received EOF from inotify event pipe" ; - Shutdown.exit 1 + Async.Shutdown.exit 1 | `Ok (Created path) | `Ok (Moved (Into path)) -> Datapath_plugins.register ~datapath_root (Filename.basename path) | `Ok (Unlinked path) | `Ok (Moved (Away path)) -> @@ -1885,7 +1894,7 @@ let self_test_plugin ~root_dir plugin = in let module Test = Storage_interface.StorageAPI (Rpc_async.GenClient ()) in let dbg = "debug" in - Monitor.try_with (fun () -> + Async.Monitor.try_with (fun () -> let open Rpc_async.ErrM in Test.Query.query rpc dbg >>= (fun query_result -> @@ -1937,7 +1946,7 @@ let self_test_plugin ~root_dir plugin = ) >>= function | Ok x -> - Async.Deferred.return x + Async_kernel.Deferred.return x | Error _y -> failwith "self test failed" @@ -1960,7 +1969,7 @@ let main ~root_dir ~state_path ~switch_path = Async_inotify.create ~recursive:false ~watch_new_dirs:false volume_root >>= fun (_, _, volume) -> let rec loop () = - Monitor.try_with (fun () -> + Async.Monitor.try_with (fun () -> Deferred.all_unit [ watch_volume_plugins ~volume_root ~switch_path ~pipe:volume @@ -1973,7 +1982,7 @@ let main ~root_dir ~state_path ~switch_path = return () | Error x -> error "main thread failed with %s" (Base.Exn.to_string x) ; - Clock.after (Core.Time.Span.of_sec 5.) >>= fun () -> loop () + Async.Clock.after (Core.Time.Span.of_sec 5.) >>= fun () -> loop () in loop () @@ -2053,7 +2062,7 @@ let _ = let* observer_enabled = observer_is_component_enabled () in config.use_observer <- observer_enabled ; let rec loop () = - Monitor.try_with (fun () -> + Async_kernel.Monitor.try_with (fun () -> if !self_test_only then self_test ~root_dir:!root_dir else @@ -2066,9 +2075,9 @@ let _ = return () | Error x -> error "main thread failed with %s" (Base.Exn.to_string x) ; - Clock.after (Core.Time.Span.of_sec 5.) >>= fun () -> loop () + Async.Clock.after (Core.Time.Span.of_sec 5.) >>= fun () -> loop () in loop () in ignore (run ()) ; - Core.never_returns (Scheduler.go ()) + Core.never_returns (Async.Scheduler.go ()) From e21f4d050feb8c26e63a40c1ea0f697292b364d3 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Tue, 18 Jul 2023 15:02:32 +0100 Subject: [PATCH 069/141] IH-397: Replace async with lwt in xapi-storage-script MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The error handling used in async is translated into Lwt_result type, this is because the base Lwt only allows exception, which isn't enough to code the errors produced. Logging has been changed to use logs with an asynchronous reporter. Now loglines print the loglevel. The setup of the loops / promises has been changed, the outer loop has been removed and its retry-on-error logic has been moved to the two inner watch loops. The inotify loop needed quite a few changes as async_inotify was doing complex handling of events, even consolidating them. This has been simplified by removing the watches on files and maintaining the directory one, and creating a list of commands, which the watch loops can act upon, like registering and deregistering plugins, for example. The file descriptor handling for communicating with launched processes needed workaround to be able to close stdin before the other channel without triggering an error at the end when all channels are closed. Error reporting was added to the smapiv2 rpc loop and will make the errors visible to xapi instead of failing silently. Co-developed-by: Edwin Török Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/dune | 18 +- ocaml/xapi-storage-script/main.ml | 1190 ++++++++++++++++++----------- 2 files changed, 734 insertions(+), 474 deletions(-) diff --git a/ocaml/xapi-storage-script/dune b/ocaml/xapi-storage-script/dune index a3b86f166b4..50af6ffcb18 100644 --- a/ocaml/xapi-storage-script/dune +++ b/ocaml/xapi-storage-script/dune @@ -1,23 +1,23 @@ (executable (name main) (libraries - async - async_inotify - async_kernel - async_unix base - base.caml core - core_unix - core_unix.time_unix - message-switch-async + fmt + inotify + inotify.lwt + logs + logs.lwt + lwt + lwt.unix + message-switch-lwt message-switch-unix ppx_deriving.runtime result rpclib.core rpclib.json - rpclib-async + rpclib-lwt sexplib sexplib0 uri diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 21f3ee03a08..c8790cbb546 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -11,23 +11,287 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -module U = Unix module R = Rpc module Types = Xapi_storage_script_types -module Plugin_client = Xapi_storage.Plugin.Plugin (Rpc_async.GenClient ()) -module Volume_client = Xapi_storage.Control.Volume (Rpc_async.GenClient ()) -module Sr_client = Xapi_storage.Control.Sr (Rpc_async.GenClient ()) -module Datapath_client = Xapi_storage.Data.Datapath (Rpc_async.GenClient ()) +module Plugin_client = Xapi_storage.Plugin.Plugin (Rpc_lwt.GenClient ()) +module Volume_client = Xapi_storage.Control.Volume (Rpc_lwt.GenClient ()) +module Sr_client = Xapi_storage.Control.Sr (Rpc_lwt.GenClient ()) +module Datapath_client = Xapi_storage.Data.Datapath (Rpc_lwt.GenClient ()) +module StringMap = Map.Make (String) -let ( >>= ) = Async_kernel.( >>= ) +let ( >>= ) = Lwt.bind -let ( >>| ) = Async_kernel.( >>| ) +let ( let* ) = Lwt.bind -let ( >>>= ) = Async_kernel.Deferred.Result.( >>= ) +let ( >>| ) = Fun.flip Lwt.map -let return = Async_kernel.return +let ( >>>= ) = Lwt_result.bind -module Deferred = Async_kernel.Deferred +let return = Lwt_result.return + +let fail = Lwt_result.fail + +let ( // ) = Filename.concat + +module Deferred = struct + let errorf fmt = + Printf.ksprintf (fun m -> Lwt.return (Base.Or_error.error_string m)) fmt + + let combine_errors lst = Lwt.all lst >>| Base.Or_error.combine_errors + + let try_with f = Lwt.try_bind f return fail +end + +module Sys = struct + type file = Regular | Directory | Other | Missing | Unknown + + let file_kind ~follow_symlinks path = + Lwt.try_bind + (fun () -> + ( if follow_symlinks then + Lwt_unix.LargeFile.stat + else + Lwt_unix.LargeFile.lstat + ) + path + ) + (function + | s -> ( + match s.Unix.LargeFile.st_kind with + | Unix.S_REG -> + Lwt.return Regular + | Unix.S_DIR -> + Lwt.return Directory + | _ -> + Lwt.return Other + ) + ) + (function + | Unix.Unix_error (Unix.ENOENT, _, _) -> + Lwt.return Missing + | Unix.Unix_error ((Unix.EACCES | Unix.ELOOP), _, _) -> + Lwt.return Unknown + | e -> + Lwt.fail e + ) + + let access path modes = + Lwt.try_bind + (fun () -> Lwt_unix.access path modes) + return + (fun exn -> fail (`not_executable (path, exn))) + + let assert_is_executable path = + file_kind ~follow_symlinks:true path >>= function + | Directory | Other | Missing | Unknown -> + fail (`missing path) + | Regular -> ( + access path [Unix.X_OK] >>= function + | Error exn -> + fail exn + | Ok () -> + return () + ) + + let read_file_contents path = + Lwt_io.(with_file ~mode:input ~flags:[O_RDONLY] ~perm:0o000 path read) + + let save ~contents path = + Lwt_io.(with_file ~mode:output path (Fun.flip write contents)) + + let readdir path = + path |> Lwt_unix.files_of_directory |> Lwt_stream.to_list >>= fun listing -> + List.filter (function "." | ".." -> false | _ -> true) listing + |> Lwt.return + + let rec mkdir_p ?(perm = 0o755) path = + file_kind ~follow_symlinks:false path >>= function + | Directory -> + Lwt.return_unit + | Regular | Other | Unknown -> + let msg = + Printf.sprintf + {|Could not create directory "%s": already exists and it's not a directory|} + path + in + Lwt.fail (Failure msg) + | Missing -> + let parent = Filename.dirname path in + mkdir_p ~perm parent >>= fun () -> Lwt_unix.mkdir path perm +end + +module Signal = struct + type t = int + + let to_string s = Fmt.(str "%a" Dump.signal s) +end + +module Process : sig + module Output : sig + type exit_or_signal = Exit_non_zero of int | Signal of Signal.t + + type t = { + exit_status: (unit, exit_or_signal) Result.t + ; stdout: string + ; stderr: string + } + end + + val run : + env:(string * string) list + -> prog:string + -> args:string list + -> input:string + -> Output.t Lwt.t + (** Runs a cli program prepeding [env] to its environment, writes [input] + into its stdin, then closing the fd, and finally waits for the program to + finish and returns the exit status, its stdout and stderr. *) +end = struct + module Output = struct + type exit_or_signal = Exit_non_zero of int | Signal of Signal.t + + type t = { + exit_status: (unit, exit_or_signal) Result.t + ; stdout: string + ; stderr: string + } + + let exit_or_signal_of_unix = function + | Unix.WEXITED 0 -> + Ok () + | WEXITED n -> + Error (Exit_non_zero n) + | WSIGNALED n -> + Error (Signal n) + | WSTOPPED n -> + Error (Signal n) + end + + let create ~env ~prog ~args = + let args = Array.of_list (prog :: args) in + let cmd = (prog, args) in + + let env = + Unix.environment () + |> Array.to_seq + |> Seq.map (fun kv -> + let k, v = Scanf.sscanf kv "%s@=%s" (fun k v -> (k, v)) in + (k, v) + ) + |> StringMap.of_seq + |> StringMap.add_seq (List.to_seq env) + |> StringMap.to_seq + |> Seq.map (fun (k, v) -> Printf.sprintf "%s=%s" k v) + |> Array.of_seq + in + + Lwt_process.open_process_full ~env cmd + + let close chan () = Lwt_io.close chan + + let send chan data = + Lwt.finalize (fun () -> Lwt_io.write chan data) (close chan) + + let receive chan = Lwt.finalize (fun () -> Lwt_io.read chan) (close chan) + + let run ~env ~prog ~args ~input = + let p = create ~env ~prog ~args in + let sender = send p#stdin input in + let receiver_out = receive p#stdout in + let receiver_err = receive p#stderr in + Lwt.catch + (fun () -> + let receiver = Lwt.both receiver_out receiver_err in + Lwt.both sender receiver >>= fun ((), (stdout, stderr)) -> + p#status >>= fun status -> + let exit_status = Output.exit_or_signal_of_unix status in + Lwt.return {Output.exit_status; stdout; stderr} + ) + (function + | Lwt.Canceled as exn -> + Lwt.cancel receiver_out ; Lwt.cancel receiver_err ; Lwt.fail exn + | exn -> + Lwt.fail exn + ) +end + +module FileWatcher = struct + type move = Away of string | Into of string + + type event = + | Created of string + | Unlinked of string + | Modified of string + | Moved of move + | Queue_overflow (** Consumer is not reading fast enough, events missed *) + + let create path = + Lwt_inotify.create () >>= fun desc -> + let watches = Hashtbl.create 32 in + let selectors = + Inotify.[S_Close; S_Create; S_Delete; S_Delete_self; S_Modify; S_Move] + in + Lwt_inotify.add_watch desc path selectors >>= fun watch -> + (* Deduplicate the watches by removing the previous one from inotify and + replacing it in the table *) + let maybe_remove = + if Hashtbl.mem watches watch then + Lwt_inotify.rm_watch desc watch + else + Lwt.return_unit + in + maybe_remove >>= fun () -> + Hashtbl.replace watches watch path ; + Lwt.return (watches, desc) + + let rec read (watches, desc) = + Lwt_inotify.read desc >>= fun (wd, mask, _cookie, filename) -> + let overflowed = + Inotify.int_of_watch wd = -1 && mask = [Inotify.Q_overflow] + in + let watch_path = Hashtbl.find_opt watches wd in + match (overflowed, watch_path) with + | true, _ -> + Lwt.return [Queue_overflow] + | _, None -> + Lwt.return [] + | _, Some base_path -> + let path = + match filename with + | None -> + base_path + | Some name -> + base_path // name + in + + List.filter_map + (function + | Inotify.Access + | Attrib + | Isdir + | Open + | Close_nowrite + | Ignored + | Unmount -> + None + | Create -> + Some (Created path) + | Delete | Delete_self -> + Some (Unlinked path) + | Close_write | Modify | Move_self -> + Some (Modified path) + | Moved_from -> + Some (Moved (Away path)) + | Moved_to -> + Some (Moved (Into path)) + | Q_overflow -> + Some Queue_overflow + ) + mask + |> Lwt.return +end + +module Clock = struct let after ~seconds = Lwt_unix.sleep seconds end type config = {mutable use_observer: bool} @@ -63,36 +327,28 @@ let missing_uri () = (* fork_exec_rpc either raises a Fork_exec_error exception or returns a successful RPC response *) let return_rpc typ result = - (* Operator to unwrap the wrapped async return type of ocaml-rpc's Rpc_async *) - let ( >*= ) a b = a |> Rpc_async.T.get >>= b in - Async_kernel.Monitor.try_with ~extract_exn:true (fun () -> + Lwt.catch + (fun () -> (* We need to delay the evaluation of [result] until now, because when fork_exec_rpc is called by GenClient.declare, it might immediately raise a Fork_exec_error *) - result () >*= fun result -> - (* In practice we'll always get a successful RPC response here (Ok), - but we still have to transform the Error to make the types match: *) - let result = - Base.Result.map_error result ~f:(fun err -> - backend_error "SCRIPT_RETURNED_RPC_ERROR" - [Rpcmarshal.marshal typ err |> R.to_string] - ) - in - return result - ) - >>= function - | Ok result -> - return result - | Error (Fork_exec_error err) -> - return (Error err) - (* We should not get any other exception from fork_exec_rpc: *) - | Error e -> - return - (Error - (backend_error "SCRIPT_FAILED" - ["Unexpected exception:" ^ Base.Exn.to_string e] - ) + Fun.flip Lwt.map + (Rpc_lwt.T.get (result ())) + (* In practice we'll always get a successful RPC response here (Ok), + but we still have to transform the Error to make the types match: *) + (Base.Result.map_error ~f:(fun err -> + backend_error "SCRIPT_RETURNED_RPC_ERROR" + [Rpcmarshal.marshal typ err |> R.to_string] + ) ) + ) + (function + | Fork_exec_error err -> + fail err + | e -> + let msg = ["Unexpected exception:" ^ Base.Exn.to_string e] in + fail (backend_error "SCRIPT_FAILED" msg) + ) let return_volume_rpc result = return_rpc Xapi_storage.Control.typ_of_exns result @@ -101,30 +357,61 @@ let return_plugin_rpc result = return_rpc Xapi_storage.Common.typ_of_exnt result let return_data_rpc result = return_rpc Xapi_storage.Common.typ_of_exnt result -let use_syslog = ref false - -let log level fmt = - Printf.ksprintf - (fun s -> - let module Writer = Async.Writer in - if !use_syslog then - (* FIXME: this is synchronous and will block other I/O. - * This should use Log_extended.Syslog, but that brings in Core's Syslog module - * which conflicts with ours *) - Syslog.log Syslog.Daemon level s - else - let w = Lazy.force Writer.stderr in - Writer.write w s ; Writer.newline w +(* Reporter taken from + https://erratique.ch/software/logs/doc/Logs_lwt/index.html#report_ex + under ISC License *) +let lwt_reporter () = + let buf_fmt ~like = + let b = Buffer.create 512 in + ( Fmt.with_buffer ~like b + , fun () -> + let m = Buffer.contents b in + Buffer.reset b ; m ) - fmt + in + let app, app_flush = buf_fmt ~like:Fmt.stdout in + let dst, dst_flush = buf_fmt ~like:Fmt.stderr in + (* The default pretty-printer adds the binary name to the loglines, which + results in appearing twice per logline, override it instead *) + let pp_header = + let pf = Format.fprintf in + let pp_header ppf (l, h) = + if l = Logs.App then + match h with None -> () | Some h -> pf ppf "[%s] " h + else + match h with + | None -> + pf ppf "[%a] " Logs.pp_level l + | Some h -> + pf ppf "[%s] " h + in + pp_header + in + let reporter = Logs.format_reporter ~app ~dst ~pp_header () in + let report src level ~over k msgf = + let k () = + let write () = + match level with + | Logs.App -> + Lwt_io.write Lwt_io.stdout (app_flush ()) + | _ -> + Lwt_io.write Lwt_io.stderr (dst_flush ()) + in + let unblock () = over () |> Lwt.return in + Lwt.finalize write unblock |> Lwt.ignore_result ; + k () + in + reporter.Logs.report src level ~over:(fun () -> ()) k msgf + in + {Logs.report} -let debug fmt = log Syslog.Debug fmt +let debug = Logs_lwt.debug -let info fmt = log Syslog.Info fmt +let info = Logs_lwt.info -let warn fmt = log Syslog.Warning fmt +let warn = Logs_lwt.warn -let error fmt = log Syslog.Err fmt +let error = Logs_lwt.err let pvs_version = "3.0" @@ -166,13 +453,12 @@ end) : sig -> ( device_config * compat_in * compat_out , Storage_interface.Errors.error ) - Deferred.Result.t + Lwt_result.t (** Compatiblity for the old PVS version of SR.create, which had signature [uri -> name -> desc -> config -> unit] *) val sr_attach : - device_config - -> (compat_in, Storage_interface.Errors.error) Deferred.Result.t + device_config -> (compat_in, Storage_interface.Errors.error) Lwt_result.t (** Compatiblity for the old PVS version of SR.attach, which had signature [uri -> sr (=string)] *) end = struct @@ -230,12 +516,12 @@ end = struct | Some version when Base.String.(version = pvs_version) -> ( match Base.List.Assoc.find ~equal:String.equal device_config "uri" with | None -> - return (Error (missing_uri ())) + fail (missing_uri ()) | Some uri -> - return (Ok (add_param_to_input [("uri", R.String uri)])) + return (add_param_to_input [("uri", R.String uri)]) ) | _ -> - return (Ok id) + return id let sr_create device_config = compat_uri device_config >>>= fun compat_in -> @@ -253,30 +539,36 @@ end = struct | _ -> id in - return (Ok (device_config, compat_in, compat_out)) + return (device_config, compat_in, compat_out) let sr_attach = compat_uri end let check_plugin_version_compatible query_result = let Xapi_storage.Plugin.{name; required_api_version; _} = query_result in - if Base.String.(required_api_version <> api_max) then - warn - "Using deprecated SMAPIv3 API version %s, latest is %s. Update your %s \ - plugin!" - required_api_version api_max name ; + ( if Base.String.(required_api_version <> api_max) then + warn (fun m -> + m + "Using deprecated SMAPIv3 API version %s, latest is %s. Update \ + your %s plugin!" + required_api_version api_max name + ) + else + Lwt.return_unit + ) + >>= fun () -> if List.mem required_api_version supported_api_versions then - Deferred.Result.return () + return () else let msg = Printf.sprintf "%s requires unknown SMAPI API version %s, supported: %s" name required_api_version (String.concat "," supported_api_versions) in - return (Error (Storage_interface.Errors.No_storage_plugin_for_sr msg)) + fail (Storage_interface.Errors.No_storage_plugin_for_sr msg) module RRD = struct - open Message_switch_async.Protocol_async + open Message_switch_lwt.Protocol_lwt let ( >>|= ) m f = m >>= function @@ -292,13 +584,13 @@ module RRD = struct let switch_rpc queue_name string_of_call response_of_string call = Client.connect ~switch:queue_name () >>|= fun t -> Client.rpc ~t ~queue:queue_name ~body:(string_of_call call) () >>|= fun s -> - return (response_of_string s) + Lwt.return (response_of_string s) let rpc = switch_rpc !Rrd_interface.queue_name Jsonrpc.string_of_call Jsonrpc.response_of_string - module Client = Rrd_interface.RPC_API (Rpc_async.GenClient ()) + module Client = Rrd_interface.RPC_API (Rpc_lwt.GenClient ()) end let _nonpersistent = "NONPERSISTENT" @@ -313,25 +605,13 @@ let _is_a_snapshot_key = "is_a_snapshot" let _snapshot_of_key = "snapshot_of" -let is_executable path = - Async.Sys.is_file ~follow_symlinks:true path >>= function - | `No | `Unknown -> - return (Error (`missing path)) - | `Yes -> ( - Async.Unix.access path [`Exec] >>= function - | Error exn -> - return (Error (`not_executable (path, exn))) - | Ok () -> - return (Ok ()) - ) - module Script = struct (** We cache (lowercase script name -> original script name) mapping for the scripts in the root directory of every registered plugin. *) let name_mapping = Base.Hashtbl.create ~size:4 (module Base.String) let update_mapping ~script_dir = - Async.Sys.readdir script_dir >>| Array.to_list >>| fun files -> + Sys.readdir script_dir >>= fun files -> (* If there are multiple files which map to the same lowercase string, we just take the first one, instead of failing *) let mapping = @@ -340,7 +620,7 @@ module Script = struct (module Base.String.Caseless) ~f:Base.String.min in - Base.Hashtbl.set name_mapping ~key:script_dir ~data:mapping + return @@ Base.Hashtbl.set name_mapping ~key:script_dir ~data:mapping let path ~script_dir ~script_name = let find () = @@ -350,14 +630,14 @@ module Script = struct Base.Map.find mapping script_name in let script_name = Option.value cached_script_name ~default:script_name in - let path = Filename.concat script_dir script_name in - is_executable path >>| function Ok () -> Ok path | Error _ as e -> e + let path = script_dir // script_name in + Sys.assert_is_executable path >>>= fun () -> return path in find () >>= function | Ok path -> - return (Ok path) + return path | Error _ -> - update_mapping ~script_dir >>= fun () -> find () + update_mapping ~script_dir >>>= fun () -> find () end let observer_config_dir = @@ -372,14 +652,13 @@ let observer_config_dir = would consist of querying the 'components' field of an observer from the xapi database. *) let observer_is_component_enabled () = - let ( let* ) = ( >>= ) in let is_enabled () = let is_config_file path = Filename.check_suffix path ".observer.conf" in - let* files = Async.Sys.readdir observer_config_dir in - return (Array.exists is_config_file files) + let* files = Sys.readdir observer_config_dir in + Lwt.return (List.exists is_config_file files) in - let* result = Async.Monitor.try_with ~extract_exn:true is_enabled in - return (Option.value (Result.to_option result) ~default:false) + let* result = Deferred.try_with is_enabled in + Lwt.return (Option.value (Result.to_option result) ~default:false) (** Call the script named after the RPC method in the [script_dir] directory. The arguments (not the whole JSON-RPC call) are passed as JSON @@ -401,10 +680,10 @@ let fork_exec_rpc : -> ?compat_out:compat_out -> ?dbg:string -> R.call - -> R.response Deferred.t = + -> R.response Lwt.t = fun ~script_dir ?missing ?(compat_in = id) ?(compat_out = id) ?dbg -> let invoke_script call script_name : - (R.response, Storage_interface.Errors.error) Deferred.Result.t = + (R.response, Storage_interface.Errors.error) Lwt_result.t = let traceparent = Option.bind dbg Debug_info.traceparent_of_dbg in let args = ["--json"] in let script_name, args, env = @@ -421,171 +700,139 @@ let fork_exec_rpc : | _ -> (script_name, args, []) in - Async.Process.create ~env:(`Extend env) ~prog:script_name ~args () >>= function - | Error e -> - error "%s failed: %s" script_name (Base.Error.to_string_hum e) ; - return - (Error - (backend_error "SCRIPT_FAILED" - [script_name; Base.Error.to_string_hum e] - ) + (* We pass just the args, not the complete JSON-RPC call. + Currently the Python code generated by rpclib requires all params to + be named - they will be converted into a name->value Python dict. + Rpclib currently puts all named params into a dict, so we expect + params to be a single Dict, if all the params are named. *) + ( match call.R.params with + | [(R.Dict _ as d)] -> + return d + | _ -> + fail + (backend_error "INCORRECT_PARAMETERS" + [ + script_name + ; "All the call parameters should be named and should be in a RPC \ + Dict" + ] ) - | Ok p -> ( - (* Send the request as json on stdin *) - let w = Async.Process.stdin p in - (* We pass just the args, not the complete JSON-RPC call. - Currently the Python code generated by rpclib requires all params to - be named - they will be converted into a name->value Python dict. - Rpclib currently puts all named params into a dict, so we expect - params to be a single Dict, if all the params are named. *) - ( match call.R.params with - | [(R.Dict _ as d)] -> - return (Ok d) - | _ -> - return - (Error - (backend_error "INCORRECT_PARAMETERS" - [ - script_name - ; "All the call parameters should be named and should be \ - in a RPC Dict" - ] - ) - ) + ) + >>>= fun input -> + let input = compat_in input |> Jsonrpc.to_string in + Process.run ~env ~prog:script_name ~args ~input >>= fun output -> + let fail_because ~cause description = + fail + (backend_error "SCRIPT_FAILED" + [ + script_name + ; description + ; cause + ; output.Process.Output.stdout + ; output.Process.Output.stdout + ] ) - >>>= fun args -> - let args = compat_in args in - Async.Writer.write w (Jsonrpc.to_string args) ; - Async.Writer.close w >>= fun () -> - Async.Process.collect_output_and_wait p >>= fun output -> - match output.Async.Process.Output.exit_status with - | Error (`Exit_non_zero code) -> ( - (* Expect an exception and backtrace on stdout *) - match - Base.Or_error.try_with (fun () -> - Jsonrpc.of_string output.Async.Process.Output.stdout - ) - with - | Error _ -> - error "%s failed and printed bad error json: %s" script_name - output.Async.Process.Output.stdout ; - error "%s failed, stderr: %s" script_name - output.Async.Process.Output.stderr ; - return - (Error - (backend_error "SCRIPT_FAILED" - [ - script_name - ; "non-zero exit and bad json on stdout" - ; string_of_int code - ; output.Async.Process.Output.stdout - ; output.Async.Process.Output.stdout - ] - ) - ) - | Ok response -> ( - match - Base.Or_error.try_with (fun () -> Types.error_of_rpc response) - with - | Error _ -> - error "%s failed and printed bad error json: %s" script_name - output.Async.Process.Output.stdout ; - error "%s failed, stderr: %s" script_name - output.Async.Process.Output.stderr ; - return - (Error - (backend_error "SCRIPT_FAILED" - [ - script_name - ; "non-zero exit and bad json on stdout" - ; string_of_int code - ; output.Async.Process.Output.stdout - ; output.Async.Process.Output.stdout - ] - ) - ) - | Ok x -> - return - (Error (backend_backtrace_error x.code x.params x.backtrace)) - ) + in + match output.Process.Output.exit_status with + | Error (Exit_non_zero code) -> ( + (* Expect an exception and backtrace on stdout *) + match + Base.Or_error.try_with (fun () -> + Jsonrpc.of_string output.Process.Output.stdout ) - | Error (`Signal signal) -> - error "%s caught a signal and failed" script_name ; - return - (Error - (backend_error "SCRIPT_FAILED" - [ - script_name - ; "signalled" - ; Async.Signal.to_string signal - ; output.Async.Process.Output.stdout - ; output.Async.Process.Output.stdout - ] - ) - ) - | Ok () -> ( - (* Parse the json on stdout. We get back a JSON-RPC - value from the scripts, not a complete JSON-RPC response *) - match - Base.Or_error.try_with (fun () -> - Jsonrpc.of_string output.Async.Process.Output.stdout + with + | Error _ -> + error (fun m -> + m "%s failed and printed bad error json: %s" script_name + output.Process.Output.stdout + ) + >>= fun () -> + error (fun m -> + m "%s failed, stderr: %s" script_name output.Process.Output.stderr + ) + >>= fun () -> + fail_because "non-zero exit and bad json on stdout" + ~cause:(string_of_int code) + | Ok response -> ( + match + Base.Or_error.try_with (fun () -> Types.error_of_rpc response) + with + | Error _ -> + error (fun m -> + m "%s failed and printed bad error json: %s" script_name + output.Process.Output.stdout ) - with - | Error _ -> - error "%s succeeded but printed bad json: %s" script_name - output.Async.Process.Output.stdout ; - return - (Error - (backend_error "SCRIPT_FAILED" - [ - script_name - ; "bad json on stdout" - ; output.Async.Process.Output.stdout - ] - ) - ) - | Ok response -> - info "%s succeeded: %s" script_name - output.Async.Process.Output.stdout ; - let response = compat_out response in - let response = R.success response in - return (Ok response) - ) + >>= fun () -> + error (fun m -> + m "%s failed, stderr: %s" script_name + output.Process.Output.stderr + ) + >>= fun () -> + fail_because "non-zero exit and bad json on stdout" + ~cause:(string_of_int code) + | Ok x -> + fail (backend_backtrace_error x.code x.params x.backtrace) ) + ) + | Error (Signal signal) -> + error (fun m -> m "%s caught a signal and failed" script_name) + >>= fun () -> fail_because "signalled" ~cause:(Signal.to_string signal) + | Ok () -> ( + (* Parse the json on stdout. We get back a JSON-RPC + value from the scripts, not a complete JSON-RPC response *) + match + Base.Or_error.try_with (fun () -> + Jsonrpc.of_string output.Process.Output.stdout + ) + with + | Error _ -> + error (fun m -> + m "%s succeeded but printed bad json: %s" script_name + output.Process.Output.stdout + ) + >>= fun () -> + fail + (backend_error "SCRIPT_FAILED" + [script_name; "bad json on stdout"; output.Process.Output.stdout] + ) + | Ok response -> + info (fun m -> + m "%s succeeded: %s" script_name output.Process.Output.stdout + ) + >>= fun () -> + let response = compat_out response in + let response = R.success response in + return response + ) in let script_rpc call : - (R.response, Storage_interface.Errors.error) Deferred.Result.t = - info "%s" (Jsonrpc.string_of_call call) ; + (R.response, Storage_interface.Errors.error) Lwt_result.t = + info (fun m -> m "%s" (Jsonrpc.string_of_call call)) >>= fun () -> Script.path ~script_dir ~script_name:call.R.name >>= function | Error (`missing path) -> ( - error "%s is not a file" path ; + error (fun m -> m "%s is not a file" path) >>= fun () -> match missing with | None -> - return - (Error - (backend_error "SCRIPT_MISSING" - [ - path - ; "Check whether the file exists and has correct \ - permissions" - ] - ) + fail + (backend_error "SCRIPT_MISSING" + [ + path + ; "Check whether the file exists and has correct permissions" + ] ) | Some m -> - warn - "Deprecated: script '%s' is missing, treating as no-op. Update \ - your plugin!" - path ; - return (Ok (R.success m)) + warn (fun m -> + m + "Deprecated: script '%s' is missing, treating as no-op. \ + Update your plugin!" + path + ) + >>= fun () -> return (R.success m) ) | Error (`not_executable (path, exn)) -> - error "%s is not executable" path ; - return - (Error - (backend_error "SCRIPT_NOT_EXECUTABLE" - [path; Base.Exn.to_string exn] - ) - ) + error (fun m -> m "%s is not executable" path) >>= fun () -> + fail + (backend_error "SCRIPT_NOT_EXECUTABLE" [path; Base.Exn.to_string exn]) | Ok path -> invoke_script call path in @@ -596,12 +843,12 @@ let fork_exec_rpc : to unmarshal that error. Therefore we either return a successful RPC response, or raise Fork_exec_error with a suitable SMAPIv2 error if the call failed. *) - let rpc : R.call -> R.response Deferred.t = + let rpc : R.call -> R.response Lwt.t = fun call -> script_rpc call >>= fun result -> Base.Result.map_error ~f:(fun e -> Fork_exec_error e) result |> Base.Result.ok_exn - |> return + |> Lwt.return in rpc @@ -626,39 +873,39 @@ module Attached_SRs = struct Base.Hashtbl.set !sr_table ~key ~data:{sr= plugin; uids} ; ( match !state_path with | None -> - return () + Lwt.return_unit | Some path -> let contents = Core.String.Table.sexp_of_t sexp_of_state !sr_table |> Sexplib.Sexp.to_string in let dir = Filename.dirname path in - Async_unix.Unix.mkdir dir >>= fun () -> Async.Writer.save path ~contents + Sys.mkdir_p dir >>= fun () -> Sys.save path ~contents ) - >>= fun () -> return (Ok ()) + >>= fun () -> return () let find smapiv2 = let key = Storage_interface.Sr.string_of smapiv2 in match Base.Hashtbl.find !sr_table key with | None -> let open Storage_interface in - return (Error (Errors.Sr_not_attached key)) + fail (Errors.Sr_not_attached key) | Some {sr; _} -> - return (Ok sr) + return sr let get_uids smapiv2 = let key = Storage_interface.Sr.string_of smapiv2 in match Base.Hashtbl.find !sr_table key with | None -> let open Storage_interface in - return (Error (Errors.Sr_not_attached key)) + fail (Errors.Sr_not_attached key) | Some {uids; _} -> - return (Ok uids) + return uids let remove smapiv2 = let key = Storage_interface.Sr.string_of smapiv2 in Base.Hashtbl.remove !sr_table key ; - return (Ok ()) + return () let list () = let srs = @@ -666,20 +913,20 @@ module Attached_SRs = struct ~f:(fun ~key ~data:_ ac -> Storage_interface.Sr.of_string key :: ac) ~init:[] in - return (Ok srs) + return srs let reload path = state_path := Some path ; - Async.Sys.is_file ~follow_symlinks:true path >>= function - | `No | `Unknown -> - return () - | `Yes -> - Async.Reader.file_contents path >>= fun contents -> + Sys.file_kind ~follow_symlinks:true path >>= function + | Regular -> + Sys.read_file_contents path >>= fun contents -> sr_table := contents |> Sexplib.Sexp.of_string |> Core.String.Table.t_of_sexp state_of_sexp ; - return () + Lwt.return_unit + | _ -> + Lwt.return_unit end module Datapath_plugins = struct @@ -687,33 +934,36 @@ module Datapath_plugins = struct let register ~datapath_root datapath_plugin_name = let result = - let script_dir = Filename.concat datapath_root datapath_plugin_name in + let script_dir = datapath_root // datapath_plugin_name in return_plugin_rpc (fun () -> Plugin_client.query (fork_exec_rpc ~script_dir) "register" ) >>>= fun response -> check_plugin_version_compatible response >>= function | Ok () -> - info "Registered datapath plugin %s" datapath_plugin_name ; + info (fun m -> m "Registered datapath plugin %s" datapath_plugin_name) + >>= fun () -> Base.Hashtbl.set table ~key:datapath_plugin_name ~data:(script_dir, response) ; - return (Ok ()) + return () | Error e -> let err_msg = Storage_interface.(rpc_of Errors.error) e |> Jsonrpc.to_string in - info "Failed to register datapath plugin %s: %s" datapath_plugin_name - err_msg ; - return (Error e) + info (fun m -> + m "Failed to register datapath plugin %s: %s" datapath_plugin_name + err_msg + ) + >>= fun () -> fail e in (* We just do not register the plugin if we've encountered any error. In the future we might want to change that, so we keep the error result above. *) - result >>= fun _ -> return () + result >>= fun _ -> Lwt.return_unit let unregister datapath_plugin_name = Base.Hashtbl.remove table datapath_plugin_name ; - return () + Lwt.return_unit let supports_feature scheme feature = match Base.Hashtbl.find table scheme with @@ -797,15 +1047,15 @@ let choose_datapath ?(persistent = true) domain response = in match preference_order with | [] -> - return (Error (missing_uri ())) + fail (missing_uri ()) | (script_dir, scheme, u) :: _us -> - return (Ok (fork_exec_rpc ~script_dir, scheme, u, domain)) + return (fork_exec_rpc ~script_dir, scheme, u, domain) (* Bind the implementations *) let bind ~volume_script_dir = (* Each plugin has its own version, see the call to listen where `process` is partially applied. *) - let module S = Storage_interface.StorageAPI (Rpc_async.GenServer ()) in + let module S = Storage_interface.StorageAPI (Rpc_lwt.GenServer ()) in let version = ref None in let volume_rpc = fork_exec_rpc ~script_dir:volume_script_dir in let module Compat = Compat (struct let version = version end) in @@ -851,35 +1101,32 @@ let bind ~volume_script_dir = ) in let update_keys ~dbg ~sr ~key ~value response = - let open Deferred.Result.Monad_infix in match value with | None -> - Deferred.Result.return response + return response | Some value -> set ~dbg ~sr ~vdi:response.Xapi_storage.Control.key ~key ~value - >>= fun () -> - Deferred.Result.return - {response with keys= (key, value) :: response.keys} + >>>= fun () -> + return {response with keys= (key, value) :: response.keys} in let vdi_attach_common dbg sr vdi domain = - let open Deferred.Result.Monad_infix in - Attached_SRs.find sr >>= fun sr -> + Attached_SRs.find sr >>>= fun sr -> (* Discover the URIs using Volume.stat *) - stat ~dbg ~sr ~vdi >>= fun response -> + stat ~dbg ~sr ~vdi >>>= fun response -> (* If we have a clone-on-boot volume then use that instead *) ( match List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> - return (Ok response) + return response | Some temporary -> stat ~dbg ~sr ~vdi:temporary ) - >>= fun response -> - choose_datapath domain response >>= fun (rpc, _datapath, uri, domain) -> + >>>= fun response -> + choose_datapath domain response >>>= fun (rpc, _datapath, uri, domain) -> return_data_rpc (fun () -> Datapath_client.attach (rpc ~dbg) dbg uri domain) in - let wrap th = Rpc_async.T.put th in + let wrap th = Rpc_lwt.T.put th in (* the actual API call for this plugin, sharing same version ref across all calls *) let query_impl dbg = let th = @@ -900,7 +1147,7 @@ let bind ~volume_script_dir = (* Look for executable scripts and automatically add capabilities *) let rec loop acc = function | [] -> - return (Ok acc) + return acc | (script_name, capability) :: rest -> ( Script.path ~script_dir:volume_script_dir ~script_name >>= function | Error _ -> @@ -947,7 +1194,7 @@ let bind ~volume_script_dir = features in let name = response.Xapi_storage.Plugin.name in - Deferred.Result.return + return { Storage_interface.driver= response.Xapi_storage.Plugin.plugin ; name @@ -967,11 +1214,10 @@ let bind ~volume_script_dir = S.Query.query query_impl ; let query_diagnostics_impl dbg = let th = - let open Deferred.Result.Monad_infix in return_plugin_rpc (fun () -> Plugin_client.diagnostics (volume_rpc ~dbg) dbg ) - >>= fun response -> Deferred.Result.return response + >>>= fun response -> return response in wrap th in @@ -991,7 +1237,7 @@ let bind ~volume_script_dir = >>>= fun stat -> let rec loop acc = function | [] -> - return acc + Lwt.return acc | datasource :: datasources -> ( let uri = Uri.of_string datasource in match Uri.scheme uri with @@ -1005,7 +1251,7 @@ let bind ~volume_script_dir = in RRD.Client.Plugin.Local.register RRD.rpc uid Rrd.Five_Seconds Rrd_interface.V2 - |> Rpc_async.T.get + |> Rpc_lwt.T.get >>= function | Ok _ -> loop (uid :: acc) datasources @@ -1018,8 +1264,7 @@ let bind ~volume_script_dir = in loop [] stat.Xapi_storage.Control.datasources >>= fun uids -> (* associate the 'sr' from the plugin with the SR reference passed in *) - Attached_SRs.add sr attach_response uids >>>= fun () -> - Deferred.Result.return () + Attached_SRs.add sr attach_response uids >>>= fun () -> return () in wrap th in @@ -1029,7 +1274,7 @@ let bind ~volume_script_dir = Attached_SRs.find sr >>= function | Error _ -> (* ensure SR.detach is idempotent *) - Deferred.Result.return () + return () | Ok sr' -> return_volume_rpc (fun () -> Sr_client.detach (volume_rpc ~dbg) dbg sr' @@ -1038,7 +1283,7 @@ let bind ~volume_script_dir = Attached_SRs.get_uids sr >>>= fun uids -> let rec loop = function | [] -> - return () + Lwt.return_unit | datasource :: datasources -> ( let uri = Uri.of_string datasource in match Uri.scheme uri with @@ -1051,7 +1296,7 @@ let bind ~volume_script_dir = uid in RRD.Client.Plugin.Local.deregister RRD.rpc uid - |> Rpc_async.T.get + |> Rpc_lwt.T.get >>= function | Ok _ -> loop datasources @@ -1063,8 +1308,7 @@ let bind ~volume_script_dir = ) in loop uids >>= fun () -> - let open Deferred.Result.Monad_infix in - Attached_SRs.remove sr >>= fun () -> Deferred.Result.return response + Attached_SRs.remove sr >>>= fun () -> return response in wrap th in @@ -1085,7 +1329,6 @@ let bind ~volume_script_dir = List.assoc_opt "sr_uuid" probe_result.Xapi_storage.Control.configuration in - let open Deferred.Or_error in let smapiv2_probe ?sr_info () = { Storage_interface.configuration= probe_result.configuration @@ -1101,7 +1344,8 @@ let bind ~volume_script_dir = ) with | _, false, Some _uuid -> - errorf "A configuration with a uuid cannot be incomplete: %a" + Deferred.errorf + "A configuration with a uuid cannot be incomplete: %a" pp_probe_result probe_result | Some sr_stat, true, Some _uuid -> let sr_info = @@ -1128,20 +1372,20 @@ let bind ~volume_script_dir = in return (smapiv2_probe ~sr_info ()) | Some _sr, _, None -> - errorf "A configuration is not attachable without a uuid: %a" + Deferred.errorf + "A configuration is not attachable without a uuid: %a" pp_probe_result probe_result | None, false, None -> return (smapiv2_probe ()) | None, true, _ -> return (smapiv2_probe ()) ) - |> Deferred.Or_error.combine_errors - |> Deferred.Result.map_error ~f:(fun err -> + |> Deferred.combine_errors + |> Lwt_result.map_error (fun err -> backend_error "SCRIPT_FAILED" ["SR.probe"; Base.Error.to_string_hum err] ) - >>>= fun results -> - Deferred.Result.return (Storage_interface.Probe results) + >>>= fun results -> return (Storage_interface.Probe results) in wrap th in @@ -1156,7 +1400,7 @@ let bind ~volume_script_dir = (volume_rpc ~dbg ~compat_in ~compat_out) dbg uuid device_config name_label description ) - >>>= fun new_device_config -> Deferred.Result.return new_device_config + >>>= fun new_device_config -> return new_device_config in wrap th in @@ -1224,7 +1468,7 @@ let bind ~volume_script_dir = ) response in - Deferred.Result.return (List.map vdi_of_volume response) + return (List.map vdi_of_volume response) ) |> wrap in @@ -1234,7 +1478,7 @@ let bind ~volume_script_dir = let get_sr_info sr = return_volume_rpc (fun () -> Sr_client.stat (volume_rpc ~dbg) dbg sr) >>>= fun response -> - Deferred.Result.return + return { Storage_interface.sr_uuid= response.Xapi_storage.Control.uuid ; name_label= response.Xapi_storage.Control.name @@ -1284,24 +1528,33 @@ let bind ~volume_script_dir = (fun x -> not (Base.Set.mem transients x.Xapi_storage.Control.key)) response in - Deferred.Result.return (List.map vdi_of_volume response, sr_info) + return (List.map vdi_of_volume response, sr_info) in let rec stat_with_retry ?(times = 3) sr = get_sr_info sr >>>= fun sr_info -> match sr_info.health with | Healthy -> - debug "%s sr %s is healthy" __FUNCTION__ sr_uuid ; + let* () = + debug (fun m -> m "%s sr %s is healthy" __FUNCTION__ sr_uuid) + in get_volume_info sr sr_info | Unreachable when times > 0 -> - debug "%s: sr %s is unreachable, remaining %d retries" __FUNCTION__ - sr_uuid times ; - Async.Clock.after Core.Time.Span.second >>= fun () -> + let* () = + debug (fun m -> + m "%s: sr %s is unreachable, remaining %d retries" __FUNCTION__ + sr_uuid times + ) + in + Clock.after ~seconds:1. >>= fun () -> stat_with_retry ~times:(times - 1) sr | health -> - debug "%s: sr unhealthy because it is %s" __FUNCTION__ - (Storage_interface.show_sr_health health) ; - Deferred.Result.fail - Storage_interface.(Errors.Sr_unhealthy (sr_uuid, health)) + let* () = + debug (fun m -> + m "%s: sr unhealthy because it is %s" __FUNCTION__ + (Storage_interface.show_sr_health health) + ) + in + fail Storage_interface.(Errors.Sr_unhealthy (sr_uuid, health)) in Attached_SRs.find sr >>>= stat_with_retry |> wrap in @@ -1318,7 +1571,7 @@ let bind ~volume_script_dir = ) >>>= update_keys ~dbg ~sr ~key:_vdi_type_key ~value:(match vdi_info.ty with "" -> None | s -> Some s) - >>>= fun response -> Deferred.Result.return (vdi_of_volume response) + >>>= fun response -> return (vdi_of_volume response) ) |> wrap in @@ -1332,7 +1585,7 @@ let bind ~volume_script_dir = List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> - return (Ok ()) + return () | Some _temporary -> (* Destroy the temporary disk we made earlier *) destroy ~dbg ~sr ~vdi @@ -1370,7 +1623,7 @@ let bind ~volume_script_dir = ; snapshot_of= Storage_interface.Vdi.of_string vdi } in - Deferred.Result.return response + return response ) |> wrap in @@ -1381,7 +1634,7 @@ let bind ~volume_script_dir = clone ~dbg ~sr ~vdi: (Storage_interface.Vdi.string_of vdi_info.Storage_interface.vdi) - >>>= fun response -> Deferred.Result.return (vdi_of_volume response) + >>>= fun response -> return (vdi_of_volume response) ) |> wrap in @@ -1416,7 +1669,7 @@ let bind ~volume_script_dir = >>>= fun () -> (* Now call Volume.stat to discover the size *) stat ~dbg ~sr ~vdi >>>= fun response -> - Deferred.Result.return response.Xapi_storage.Control.virtual_size + return response.Xapi_storage.Control.virtual_size ) |> wrap in @@ -1424,8 +1677,7 @@ let bind ~volume_script_dir = let vdi_stat_impl dbg sr vdi' = (let vdi = Storage_interface.Vdi.string_of vdi' in Attached_SRs.find sr >>>= fun sr -> - stat ~dbg ~sr ~vdi >>>= fun response -> - Deferred.Result.return (vdi_of_volume response) + stat ~dbg ~sr ~vdi >>>= fun response -> return (vdi_of_volume response) ) |> wrap in @@ -1435,7 +1687,7 @@ let bind ~volume_script_dir = >>>= (fun sr -> let vdi = location in stat ~dbg ~sr ~vdi >>>= fun response -> - Deferred.Result.return (vdi_of_volume response) + return (vdi_of_volume response) ) |> wrap in @@ -1454,7 +1706,7 @@ let bind ~volume_script_dir = | Nbd {uri} -> Nbd {uri} in - Deferred.Result.return + return { Storage_interface.implementations= List.map convert_implementation @@ -1475,7 +1727,7 @@ let bind ~volume_script_dir = List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> - return (Ok response) + return response | Some temporary -> stat ~dbg ~sr ~vdi:temporary ) @@ -1509,7 +1761,7 @@ let bind ~volume_script_dir = List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> - return (Ok response) + return response | Some temporary -> stat ~dbg ~sr ~vdi:temporary ) @@ -1532,7 +1784,7 @@ let bind ~volume_script_dir = List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> - return (Ok response) + return response | Some temporary -> stat ~dbg ~sr ~vdi:temporary ) @@ -1548,7 +1800,7 @@ let bind ~volume_script_dir = >>>= (fun sr -> return_volume_rpc (fun () -> Sr_client.stat (volume_rpc ~dbg) dbg sr) >>>= fun response -> - Deferred.Result.return + return { Storage_interface.sr_uuid= response.Xapi_storage.Control.uuid ; name_label= response.Xapi_storage.Control.name @@ -1595,7 +1847,7 @@ let bind ~volume_script_dir = List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> - Deferred.Result.return () + return () | Some temporary -> (* Destroy the temporary disk we made earlier *) destroy ~dbg ~sr ~vdi:temporary @@ -1605,7 +1857,7 @@ let bind ~volume_script_dir = set ~dbg ~sr ~vdi ~key:_clone_on_boot_key ~value:vdi'.Xapi_storage.Control.key else - Deferred.Result.return () + return () ) |> wrap in @@ -1624,19 +1876,16 @@ let bind ~volume_script_dir = List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> - Deferred.Result.return () + return () | Some temporary -> (* Destroy the temporary disk we made earlier *) destroy ~dbg ~sr ~vdi:temporary >>>= fun () -> - unset ~dbg ~sr ~vdi ~key:_clone_on_boot_key >>>= fun () -> - Deferred.Result.return () + unset ~dbg ~sr ~vdi ~key:_clone_on_boot_key >>>= fun () -> return () ) |> wrap in S.VDI.epoch_end vdi_epoch_end_impl ; - let vdi_set_persistent_impl _dbg _sr _vdi _persistent = - Deferred.Result.return () |> wrap - in + let vdi_set_persistent_impl _dbg _sr _vdi _persistent = return () |> wrap in S.VDI.set_persistent vdi_set_persistent_impl ; let dp_destroy2 dbg _dp sr vdi' vm' _allow_leak = (let vdi = Storage_interface.Vdi.string_of vdi' in @@ -1648,7 +1897,7 @@ let bind ~volume_script_dir = List.assoc_opt _clone_on_boot_key response.Xapi_storage.Control.keys with | None -> - return (Ok response) + return response | Some temporary -> stat ~dbg ~sr ~vdi:temporary ) @@ -1664,12 +1913,12 @@ let bind ~volume_script_dir = in S.DP.destroy2 dp_destroy2 ; let sr_list _dbg = - Attached_SRs.list () >>>= (fun srs -> Deferred.Result.return srs) |> wrap + Attached_SRs.list () >>>= (fun srs -> return srs) |> wrap in S.SR.list sr_list ; (* SR.reset is a no op in SMAPIv3 *) - S.SR.reset (fun _ _ -> Deferred.Result.return () |> wrap) ; - let ( let* ) = ( >>>= ) in + S.SR.reset (fun _ _ -> return () |> wrap) ; + let ( let* ) = Lwt_result.bind in let vdi_enable_cbt_impl dbg sr vdi = wrap @@ @@ -1695,7 +1944,7 @@ let bind ~volume_script_dir = @@ let* sr = Attached_SRs.find sr in let vdi, vdi' = Storage_interface.Vdi.(string_of vdi, string_of vdi') in - let ( let* ) = ( >>= ) in + let ( let* ) = Lwt.bind in let* result = return_volume_rpc (fun () -> (* Negative lengths indicate that we want the full length. *) @@ -1704,7 +1953,7 @@ let bind ~volume_script_dir = ) in let proj_bitmap r = r.Xapi_storage.Control.bitmap in - return (Result.map proj_bitmap result) + Lwt.return (Result.map proj_bitmap result) in S.VDI.list_changed_blocks vdi_list_changed_blocks_impl ; let vdi_data_destroy_impl dbg sr vdi = @@ -1712,13 +1961,12 @@ let bind ~volume_script_dir = @@ let* sr = Attached_SRs.find sr in let vdi = Storage_interface.Vdi.string_of vdi in - let* response = + let* () = return_volume_rpc (fun () -> Volume_client.data_destroy (volume_rpc ~dbg) dbg sr vdi ) in - let* () = set ~dbg ~sr ~vdi ~key:_vdi_type_key ~value:"cbt_metadata" in - Deferred.Result.return response + set ~dbg ~sr ~vdi ~key:_vdi_type_key ~value:"cbt_metadata" in S.VDI.data_destroy vdi_data_destroy_impl ; let u name _ = failwith ("Unimplemented: " ^ name) in @@ -1755,12 +2003,19 @@ let bind ~volume_script_dir = S.DATA.MIRROR.receive_cancel (u "DATA.MIRROR.receive_cancel") ; S.SR.update_snapshot_info_src (u "SR.update_snapshot_info_src") ; S.DATA.MIRROR.stop (u "DATA.MIRROR.stop") ; - Rpc_async.server S.implementation + Rpc_lwt.server S.implementation let process_smapiv2_requests server txt = let request = Jsonrpc.call_of_string txt in - server request >>= fun response -> - Deferred.return (Jsonrpc.string_of_response response) + let to_err e = + Storage_interface.(rpc_of Errors.error Errors.(Internal_error e)) + in + Lwt.try_bind + (fun () -> server request) + (fun response -> Lwt.return (Jsonrpc.string_of_response response)) + (fun exn -> + Printexc.to_string exn |> to_err |> Jsonrpc.to_string |> Lwt.return + ) (** Active servers, one per sub-directory of the volume_root_dir *) let servers = Base.Hashtbl.create ~size:4 (module Base.String) @@ -1786,15 +2041,41 @@ let rec diff a b = (* default false due to bugs in SMAPIv3 plugins, once they are fixed this should be set to true *) let concurrent = ref false +type action_file = Create of string | Delete of string + +type action_dir = Files of action_file list | Sync | Nothing + +let actions_from events = + List.fold_left + (fun acc event -> + match (event, acc) with + | FileWatcher.Queue_overflow, _ -> + Sync + | _, Sync -> + Sync + | (Moved (Away path) | Unlinked path), Nothing -> + Files [Delete path] + | (Moved (Away path) | Unlinked path), Files files -> + Files (Delete path :: files) + | (Moved (Into path) | Created path), Nothing -> + Files [Create path] + | (Moved (Into path) | Created path), Files files -> + Files (Create path :: files) + | Modified path, Nothing -> + Files [Create path; Delete path] + | Modified path, Files files -> + Files (Create path :: Delete path :: files) + ) + Nothing events -let watch_volume_plugins ~volume_root ~switch_path ~pipe = +let watch_volume_plugins ~volume_root ~switch_path ~pipe () = let create volume_plugin_name = if Base.Hashtbl.mem servers volume_plugin_name then - return () - else ( - info "Adding %s" volume_plugin_name ; - let volume_script_dir = Filename.concat volume_root volume_plugin_name in - Message_switch_async.Protocol_async.Server.( + Lwt.return_unit + else + info (fun m -> m "Adding %s" volume_plugin_name) >>= fun () -> + let volume_script_dir = volume_root // volume_plugin_name in + Message_switch_lwt.Protocol_lwt.Server.( if !concurrent then listen_p else listen ) ~process:(process_smapiv2_requests (bind ~volume_script_dir)) @@ -1804,82 +2085,74 @@ let watch_volume_plugins ~volume_root ~switch_path ~pipe = >>= fun result -> let server = get_ok result in Base.Hashtbl.add_exn servers ~key:volume_plugin_name ~data:server ; - return () - ) + Lwt.return_unit in let destroy volume_plugin_name = - info "Removing %s" volume_plugin_name ; + info (fun m -> m "Removing %s" volume_plugin_name) >>= fun () -> match Base.Hashtbl.find servers volume_plugin_name with | Some t -> - Message_switch_async.Protocol_async.Server.shutdown ~t () >>= fun () -> + Message_switch_lwt.Protocol_lwt.Server.shutdown ~t () >>= fun () -> Base.Hashtbl.remove servers volume_plugin_name ; - return () + Lwt.return_unit | None -> - return () + Lwt.return_unit in let sync () = - Async.Sys.readdir volume_root >>= fun names -> - let needed : string list = Array.to_list names in + Sys.readdir volume_root >>= fun needed -> let got_already : string list = Base.Hashtbl.keys servers in - Deferred.all_unit (List.map create (diff needed got_already)) >>= fun () -> - Deferred.all_unit (List.map destroy (diff got_already needed)) + Lwt.join (List.map create (diff needed got_already)) >>= fun () -> + Lwt.join (List.map destroy (diff got_already needed)) in sync () >>= fun () -> - let open Async_inotify.Event in + let resolve_file = function + | Create path -> + create (Filename.basename path) + | Delete path -> + destroy (Filename.basename path) + in + let resolve = function + | Sync -> + sync () + | Nothing -> + Lwt.return_unit + | Files files -> + Lwt_list.iter_s resolve_file (List.rev files) + in let rec loop () = - (Async.Pipe.read pipe >>= function - | `Eof -> - info "Received EOF from inotify event pipe" ; - Async.Shutdown.exit 1 - | `Ok (Created path) | `Ok (Moved (Into path)) -> - create (Filename.basename path) - | `Ok (Unlinked path) | `Ok (Moved (Away path)) -> - destroy (Filename.basename path) - | `Ok (Modified _) -> - return () - | `Ok (Moved (Move (path_a, path_b))) -> - destroy (Filename.basename path_a) >>= fun () -> - create (Filename.basename path_b) - | `Ok Queue_overflow -> - sync () - ) + (FileWatcher.read pipe >>= fun events -> resolve (actions_from events)) >>= fun () -> loop () in loop () -let watch_datapath_plugins ~datapath_root ~pipe = +let watch_datapath_plugins ~datapath_root ~pipe () = let sync () = - Async.Sys.readdir datapath_root >>= fun names -> - let needed : string list = Array.to_list names in + Sys.readdir datapath_root >>= fun needed -> let got_already : string list = Base.Hashtbl.keys servers in - Deferred.all_unit + Lwt.join (List.map (Datapath_plugins.register ~datapath_root) (diff needed got_already) ) >>= fun () -> - Deferred.all_unit - (List.map Datapath_plugins.unregister (diff got_already needed)) + Lwt.join (List.map Datapath_plugins.unregister (diff got_already needed)) in sync () >>= fun () -> - let open Async_inotify.Event in + let resolve_file = function + | Create path -> + Datapath_plugins.register ~datapath_root (Filename.basename path) + | Delete path -> + Datapath_plugins.unregister (Filename.basename path) + in + let resolve = function + | Sync -> + sync () + | Nothing -> + Lwt.return_unit + | Files files -> + Lwt_list.iter_s resolve_file (List.rev files) + in let rec loop () = - (Async.Pipe.read pipe >>= function - | `Eof -> - info "Received EOF from inotify event pipe" ; - Async.Shutdown.exit 1 - | `Ok (Created path) | `Ok (Moved (Into path)) -> - Datapath_plugins.register ~datapath_root (Filename.basename path) - | `Ok (Unlinked path) | `Ok (Moved (Away path)) -> - Datapath_plugins.unregister (Filename.basename path) - | `Ok (Modified _) -> - return () - | `Ok (Moved (Move (path_a, path_b))) -> - Datapath_plugins.unregister (Filename.basename path_a) >>= fun () -> - Datapath_plugins.register ~datapath_root (Filename.basename path_b) - | `Ok Queue_overflow -> - sync () - ) + (FileWatcher.read pipe >>= fun events -> resolve (actions_from events)) >>= fun () -> loop () in loop () @@ -1889,13 +2162,13 @@ let self_test_plugin ~root_dir plugin = let process = process_smapiv2_requests (bind ~volume_script_dir) in let rpc call = call |> Jsonrpc.string_of_call |> process >>= fun r -> - debug "RPC: %s" r ; - return (Jsonrpc.response_of_string r) + debug (fun m -> m "RPC: %s" r) >>= fun () -> + Lwt.return (Jsonrpc.response_of_string r) in - let module Test = Storage_interface.StorageAPI (Rpc_async.GenClient ()) in + let module Test = Storage_interface.StorageAPI (Rpc_lwt.GenClient ()) in let dbg = "debug" in - Async.Monitor.try_with (fun () -> - let open Rpc_async.ErrM in + Deferred.try_with (fun () -> + let open Rpc_lwt.ErrM in Test.Query.query rpc dbg >>= (fun query_result -> Test.Query.diagnostics rpc dbg >>= fun _msg -> @@ -1942,49 +2215,47 @@ let self_test_plugin ~root_dir plugin = else return () ) - |> Rpc_async.T.get + |> Rpc_lwt.T.get ) >>= function | Ok x -> - Async_kernel.Deferred.return x - | Error _y -> - failwith "self test failed" + Lwt.return x + | Error e -> + failwith (Printf.sprintf "self test failed with %s" (Printexc.to_string e)) let self_test ~root_dir = self_test_plugin ~root_dir "org.xen.xapi.storage.dummyv5" >>= function | Ok () -> - info "test thread shutdown cleanly" ; - Async_unix.exit 0 + info (fun m -> m "test thread shutdown cleanly") >>= fun () -> exit 0 | Error x -> - error "test thread failed with %s" - (Storage_interface.(rpc_of Errors.error) x |> Jsonrpc.to_string) ; - Async_unix.exit 2 + error (fun m -> + m "test thread failed with %s" + (Storage_interface.(rpc_of Errors.error) x |> Jsonrpc.to_string) + ) + >>= fun () -> exit 2 let main ~root_dir ~state_path ~switch_path = Attached_SRs.reload state_path >>= fun () -> - let datapath_root = Filename.concat root_dir "datapath" in - Async_inotify.create ~recursive:false ~watch_new_dirs:false datapath_root - >>= fun (_, _, datapath) -> - let volume_root = Filename.concat root_dir "volume" in - Async_inotify.create ~recursive:false ~watch_new_dirs:false volume_root - >>= fun (_, _, volume) -> - let rec loop () = - Async.Monitor.try_with (fun () -> - Deferred.all_unit - [ - watch_volume_plugins ~volume_root ~switch_path ~pipe:volume - ; watch_datapath_plugins ~datapath_root ~pipe:datapath - ] - ) - >>= function + let datapath_root = root_dir // "datapath" in + FileWatcher.create datapath_root >>= fun datapath -> + let volume_root = root_dir // "volume" in + FileWatcher.create volume_root >>= fun volume -> + let rec retry_loop ((name, promise) as thread) () = + Deferred.try_with promise >>= function | Ok () -> - info "main thread shutdown cleanly" ; - return () + Lwt.return_unit | Error x -> - error "main thread failed with %s" (Base.Exn.to_string x) ; - Async.Clock.after (Core.Time.Span.of_sec 5.) >>= fun () -> loop () + error (fun m -> m "%s thread failed with %s" name (Base.Exn.to_string x)) + >>= fun () -> Clock.after ~seconds:5. >>= retry_loop thread in - loop () + [ + ( "volume plugins" + , watch_volume_plugins ~volume_root ~switch_path ~pipe:volume + ) + ; ("datapath plugins", watch_datapath_plugins ~datapath_root ~pipe:datapath) + ] + |> List.map (fun thread -> retry_loop thread ()) + |> Lwt.join open Xcp_service @@ -2014,7 +2285,7 @@ let register_exn_pretty_printers () = assert false ) -let _ = +let () = register_exn_pretty_printers () ; let root_dir = ref "/var/lib/xapi/storage-scripts" in let state_path = ref "/var/run/nonpersistent/xapi-storage-script/state.db" in @@ -2027,7 +2298,7 @@ let _ = scripts, one sub-directory per queue name" ; essential= true ; path= root_dir - ; perms= [U.X_OK] + ; perms= [Unix.X_OK] } ; { Xcp_service.name= "state" @@ -2057,27 +2328,16 @@ let _ = in configure2 ~name:"xapi-script-storage" ~version:Xapi_version.version ~doc:description ~resources ~options () ; - let run () = - let ( let* ) = ( >>= ) in + + Logs.set_reporter (lwt_reporter ()) ; + Logs.set_level ~all:true (Some Logs.Info) ; + let main = let* observer_enabled = observer_is_component_enabled () in config.use_observer <- observer_enabled ; - let rec loop () = - Async_kernel.Monitor.try_with (fun () -> - if !self_test_only then - self_test ~root_dir:!root_dir - else - main ~root_dir:!root_dir ~state_path:!state_path - ~switch_path:!Xcp_client.switch_path - ) - >>= function - | Ok () -> - info "main thread shutdown cleanly" ; - return () - | Error x -> - error "main thread failed with %s" (Base.Exn.to_string x) ; - Async.Clock.after (Core.Time.Span.of_sec 5.) >>= fun () -> loop () - in - loop () + if !self_test_only then + self_test ~root_dir:!root_dir + else + main ~root_dir:!root_dir ~state_path:!state_path + ~switch_path:!Xcp_client.switch_path in - ignore (run ()) ; - Core.never_returns (Async.Scheduler.go ()) + Lwt_main.run main From b99f1846910eb3e62b4a7c81c544602102a485de Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Tue, 21 Nov 2023 13:40:17 +0000 Subject: [PATCH 070/141] IH-397: remove all core usages from xapi-storage-script Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/dune | 3 +-- ocaml/xapi-storage-script/main.ml | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ocaml/xapi-storage-script/dune b/ocaml/xapi-storage-script/dune index 50af6ffcb18..9670539ee30 100644 --- a/ocaml/xapi-storage-script/dune +++ b/ocaml/xapi-storage-script/dune @@ -2,8 +2,7 @@ (name main) (libraries base - core - + fmt inotify inotify.lwt diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index c8790cbb546..59341a61f4e 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -876,7 +876,7 @@ module Attached_SRs = struct Lwt.return_unit | Some path -> let contents = - Core.String.Table.sexp_of_t sexp_of_state !sr_table + Base.Hashtbl.sexp_of_t sexp_of_string sexp_of_state !sr_table |> Sexplib.Sexp.to_string in let dir = Filename.dirname path in @@ -923,7 +923,7 @@ module Attached_SRs = struct sr_table := contents |> Sexplib.Sexp.of_string - |> Core.String.Table.t_of_sexp state_of_sexp ; + |> Base.Hashtbl.Poly.t_of_sexp string_of_sexp state_of_sexp ; Lwt.return_unit | _ -> Lwt.return_unit From 667c4162f3174fad3fa1f61977b96b7bade23002 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Mon, 18 Dec 2023 17:55:02 +0000 Subject: [PATCH 071/141] xapi-storage-script: Change directory creation Avoid blowing up the stack when creating directories recursively. Now an optimistic approach is used: create the directory, and if if cannot be created try to create the parent. This avoid races in creation, but causes using 2 calls per directory created for all the directories that need to be created aside from the top-most. Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/main.ml | 34 ++++++++++++++++++------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 59341a61f4e..fe7d4bbbd15 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -104,20 +104,26 @@ module Sys = struct List.filter (function "." | ".." -> false | _ -> true) listing |> Lwt.return - let rec mkdir_p ?(perm = 0o755) path = - file_kind ~follow_symlinks:false path >>= function - | Directory -> - Lwt.return_unit - | Regular | Other | Unknown -> - let msg = - Printf.sprintf - {|Could not create directory "%s": already exists and it's not a directory|} - path - in - Lwt.fail (Failure msg) - | Missing -> - let parent = Filename.dirname path in - mkdir_p ~perm parent >>= fun () -> Lwt_unix.mkdir path perm + let mkdir_p ?(perm = 0o755) path = + let rec loop acc path = + let create_dir () = Lwt_unix.mkdir path perm in + let create_subdirs () = Lwt_list.iter_s (fun f -> f ()) acc in + Lwt.try_bind create_dir create_subdirs (function + | Unix.(Unix_error (EEXIST, _, _)) -> + (* create directories, parents first *) + create_subdirs () + | Unix.(Unix_error (ENOENT, _, _)) -> + let parent = Filename.dirname path in + loop (create_dir :: acc) parent + | exn -> + let msg = + Printf.sprintf {|Could not create directory "%s" because: %s|} + path (Printexc.to_string exn) + in + Lwt.fail (Failure msg) + ) + in + loop [] path end module Signal = struct From db797b91ea425e5aa41093c6d6617324d68a5fce Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 15 Mar 2024 11:22:26 +0000 Subject: [PATCH 072/141] xapi-storage-script: remove custom id use Fun.id instead Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/main.ml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index fe7d4bbbd15..5d9441c18ce 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -425,8 +425,6 @@ let supported_api_versions = [pvs_version; "5.0"] let api_max = List.fold_left Base.String.max "" supported_api_versions -let id x = x - (** A function that changes the input to make it compatible with an older script *) type compat_in = R.t -> R.t @@ -527,7 +525,7 @@ end = struct return (add_param_to_input [("uri", R.String uri)]) ) | _ -> - return id + return Fun.id let sr_create device_config = compat_uri device_config >>>= fun compat_in -> @@ -543,7 +541,7 @@ end = struct rpc ) | _ -> - id + Fun.id in return (device_config, compat_in, compat_out) @@ -687,7 +685,7 @@ let fork_exec_rpc : -> ?dbg:string -> R.call -> R.response Lwt.t = - fun ~script_dir ?missing ?(compat_in = id) ?(compat_out = id) ?dbg -> + fun ~script_dir ?missing ?(compat_in = Fun.id) ?(compat_out = Fun.id) ?dbg -> let invoke_script call script_name : (R.response, Storage_interface.Errors.error) Lwt_result.t = let traceparent = Option.bind dbg Debug_info.traceparent_of_dbg in @@ -987,7 +985,7 @@ let vdi_of_volume x = | Some v -> v |> of_string in - let find_string = find ~of_string:id in + let find_string = find ~of_string:Fun.id in let open Storage_interface in { vdi= Vdi.of_string x.Xapi_storage.Control.key From 810fa04c0195304874571ce7fe0c449e238673d3 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 15 Mar 2024 14:00:44 +0000 Subject: [PATCH 073/141] xapi-storage-script: don't duplicate code from message-switch Both client and server have code to pretty-print errors, and it's exactly the same as what was implemented here, reuse it instead. Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/main.ml | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 5d9441c18ce..8c57a9dc759 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -575,15 +575,9 @@ module RRD = struct open Message_switch_lwt.Protocol_lwt let ( >>|= ) m f = - m >>= function - | Ok x -> - f x - | Error y -> - let b = Buffer.create 16 in - let fmt = Format.formatter_of_buffer b in - Client.pp_error fmt y ; - Format.pp_print_flush fmt () ; - raise (Failure (Buffer.contents b)) + m >>= fun x -> + Client.error_to_msg x + |> Result.fold ~ok:f ~error:(function `Msg err -> failwith err) let switch_rpc queue_name string_of_call response_of_string call = Client.connect ~switch:queue_name () >>|= fun t -> @@ -2025,15 +2019,9 @@ let process_smapiv2_requests server txt = let servers = Base.Hashtbl.create ~size:4 (module Base.String) (* XXX: need a better error-handling strategy *) -let get_ok = function - | Ok x -> - x - | Error e -> - let b = Buffer.create 16 in - let fmt = Format.formatter_of_buffer b in - Message_switch_unix.Protocol_unix.Server.pp_error fmt e ; - Format.pp_print_flush fmt () ; - failwith (Buffer.contents b) +let get_ok x = + Message_switch_unix.Protocol_unix.Server.error_to_msg x + |> Result.fold ~ok:Fun.id ~error:(function `Msg err -> failwith err) let rec diff a b = match a with From 9580a6530bdfb0702959d8e255baedab38844547 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 28 Jul 2023 15:33:02 +0100 Subject: [PATCH 074/141] IH-397: remove all dependencies on async and core Removes async-only packages and changes the opam metadata to reflect the lack of dependencies on the libraries Signed-off-by: Pau Ruiz Safont --- Makefile | 4 +- dune-project | 8 - message-switch-async.opam | 35 ---- message-switch-async.opam.template | 33 ---- message-switch.opam | 2 - message-switch.opam.template | 2 - ocaml/message-switch/async/dune | 17 -- ocaml/message-switch/async/protocol_async.ml | 141 -------------- ocaml/message-switch/async/protocol_async.mli | 23 --- .../core_test/async/client_async_main.ml | 94 ---------- ocaml/message-switch/core_test/async/dune | 21 --- .../core_test/async/server_async_main.ml | 66 ------- .../core_test/basic-rpc-test.sh | 14 +- .../core_test/concur-rpc-test.sh | 11 +- ocaml/message-switch/core_test/dune | 18 +- .../core_test/lock_test_async.ml | 13 -- ocaml/xen-api-client/async/dune | 25 --- .../async/xen_api_async_unix.ml | 134 -------------- .../async/xen_api_async_unix.mli | 28 --- ocaml/xen-api-client/async_examples/dune | 48 ----- .../async_examples/event_test.ml | 175 ------------------ .../xen-api-client/async_examples/list_vms.ml | 56 ------ quality-gate.sh | 2 +- xapi-idl.opam | 2 +- xapi-idl.opam.template | 2 +- xapi-storage-script.opam | 22 +-- xapi-storage-script.opam.template | 22 +-- xen-api-client-async.opam | 38 ---- xen-api-client-async.opam.template | 36 ---- 29 files changed, 31 insertions(+), 1061 deletions(-) delete mode 100644 message-switch-async.opam delete mode 100644 message-switch-async.opam.template delete mode 100644 ocaml/message-switch/async/dune delete mode 100644 ocaml/message-switch/async/protocol_async.ml delete mode 100644 ocaml/message-switch/async/protocol_async.mli delete mode 100644 ocaml/message-switch/core_test/async/client_async_main.ml delete mode 100644 ocaml/message-switch/core_test/async/dune delete mode 100644 ocaml/message-switch/core_test/async/server_async_main.ml delete mode 100644 ocaml/message-switch/core_test/lock_test_async.ml delete mode 100644 ocaml/xen-api-client/async/dune delete mode 100644 ocaml/xen-api-client/async/xen_api_async_unix.ml delete mode 100644 ocaml/xen-api-client/async/xen_api_async_unix.mli delete mode 100644 ocaml/xen-api-client/async_examples/dune delete mode 100644 ocaml/xen-api-client/async_examples/event_test.ml delete mode 100644 ocaml/xen-api-client/async_examples/list_vms.ml delete mode 100644 xen-api-client-async.opam delete mode 100644 xen-api-client-async.opam.template diff --git a/Makefile b/Makefile index 53d01a4b063..7f7386bf6b1 100644 --- a/Makefile +++ b/Makefile @@ -150,9 +150,9 @@ install-extra: DUNE_IU_PACKAGES1=-j $(JOBS) --destdir=$(DESTDIR) --prefix=$(PREFIX) --libdir=$(LIBDIR) --mandir=$(MANDIR) DUNE_IU_PACKAGES1+=--libexecdir=$(XENOPSD_LIBEXECDIR) --datadir=$(SDKDIR) DUNE_IU_PACKAGES1+=xapi-client xapi-schema xapi-consts xapi-cli-protocol xapi-datamodel xapi-types -DUNE_IU_PACKAGES1+=xen-api-client xen-api-client-lwt xen-api-client-async rrdd-plugin rrd-transport +DUNE_IU_PACKAGES1+=xen-api-client xen-api-client-lwt rrdd-plugin rrd-transport DUNE_IU_PACKAGES1+=gzip http-lib pciutil sexpr stunnel uuid xml-light2 zstd xapi-compression safe-resources -DUNE_IU_PACKAGES1+=message-switch message-switch-async message-switch-cli message-switch-core message-switch-lwt +DUNE_IU_PACKAGES1+=message-switch message-switch-cli message-switch-core message-switch-lwt DUNE_IU_PACKAGES1+=message-switch-unix xapi-idl forkexec xapi-forkexecd xapi-storage xapi-storage-script xapi-storage-cli DUNE_IU_PACKAGES1+=xapi-nbd varstored-guard xapi-log xapi-open-uri xapi-tracing xapi-tracing-export xapi-expiry-alerts cohttp-posix DUNE_IU_PACKAGES1+=xapi-rrd xapi-inventory clock xapi-sdk diff --git a/dune-project b/dune-project index fc74adc8a6e..0fc69762a05 100644 --- a/dune-project +++ b/dune-project @@ -61,10 +61,6 @@ ) -(package - (name xen-api-client-async) -) - (package (name xen-api-client) (synopsis "Xen-API client library for remotely-controlling a xapi host") @@ -519,10 +515,6 @@ This package provides an Lwt compatible interface to the library.") (name pciutil) ) -(package - (name message-switch-async) -) - (package (name message-switch-lwt) ) diff --git a/message-switch-async.opam b/message-switch-async.opam deleted file mode 100644 index ac53e522c21..00000000000 --- a/message-switch-async.opam +++ /dev/null @@ -1,35 +0,0 @@ -# This file is generated by dune, edit dune-project instead -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -opam-version: "2.0" -name: "message-switch-async" -maintainer: "xen-api@lists.xen.org" -authors: [ "xen-api@lists.xen.org" ] -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -tags: [ "org:xapi-project" ] -build: [ - ["./configure" "--prefix" "%{prefix}%"] - [ "dune" "build" "-p" name "-j" jobs ] -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "odoc" {with-doc} - "async" {>= "v0.9.0"} - "async_kernel" - "async_unix" - "base" - "core" - "core_kernel" - "core_unix" - "cohttp-async" {>= "1.0.2"} - "message-switch-core" -] -synopsis: "A simple store-and-forward message switch" -description: """ -The switch stores messages in queues with well-known names. Clients use -a simple HTTP protocol to enqueue and dequeue messages.""" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/message-switch-async.opam.template b/message-switch-async.opam.template deleted file mode 100644 index aaa69dc257e..00000000000 --- a/message-switch-async.opam.template +++ /dev/null @@ -1,33 +0,0 @@ -opam-version: "2.0" -name: "message-switch-async" -maintainer: "xen-api@lists.xen.org" -authors: [ "xen-api@lists.xen.org" ] -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -tags: [ "org:xapi-project" ] -build: [ - ["./configure" "--prefix" "%{prefix}%"] - [ "dune" "build" "-p" name "-j" jobs ] -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "odoc" {with-doc} - "async" {>= "v0.9.0"} - "async_kernel" - "async_unix" - "base" - "core" - "core_kernel" - "core_unix" - "cohttp-async" {>= "1.0.2"} - "message-switch-core" -] -synopsis: "A simple store-and-forward message switch" -description: """ -The switch stores messages in queues with well-known names. Clients use -a simple HTTP protocol to enqueue and dequeue messages.""" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/message-switch.opam b/message-switch.opam index 4ee77fdca5d..f0dcf7ff224 100644 --- a/message-switch.opam +++ b/message-switch.opam @@ -18,11 +18,9 @@ depends: [ "dune" {>= "3.15"} "odoc" {with-doc} "cmdliner" - "cohttp-async" {with-test} "cohttp-lwt-unix" "io-page" {>= "2.4.0"} "lwt_log" - "message-switch-async" {with-test} "message-switch-lwt" "message-switch-unix" "mirage-block-unix" {>= "2.4.0"} diff --git a/message-switch.opam.template b/message-switch.opam.template index 8a898c41747..a33fe27cb3e 100644 --- a/message-switch.opam.template +++ b/message-switch.opam.template @@ -16,11 +16,9 @@ depends: [ "dune" {>= "3.15"} "odoc" {with-doc} "cmdliner" - "cohttp-async" {with-test} "cohttp-lwt-unix" "io-page" {>= "2.4.0"} "lwt_log" - "message-switch-async" {with-test} "message-switch-lwt" "message-switch-unix" "mirage-block-unix" {>= "2.4.0"} diff --git a/ocaml/message-switch/async/dune b/ocaml/message-switch/async/dune deleted file mode 100644 index 89f2c3a5ff4..00000000000 --- a/ocaml/message-switch/async/dune +++ /dev/null @@ -1,17 +0,0 @@ -(library - (name message_switch_async) - (public_name message-switch-async) - (libraries - (re_export async) - (re_export async_unix) - async_kernel - base - cohttp-async - (re_export core) - core_unix - core_kernel - core_unix.time_unix - message-switch-core - ) -) - diff --git a/ocaml/message-switch/async/protocol_async.ml b/ocaml/message-switch/async/protocol_async.ml deleted file mode 100644 index 2bc34621563..00000000000 --- a/ocaml/message-switch/async/protocol_async.ml +++ /dev/null @@ -1,141 +0,0 @@ -(* - * Copyright (c) Citrix Systems Inc. - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - *) - -let whoami () = - Printf.sprintf "%s:%d" (Filename.basename Sys.argv.(0)) (Unix.getpid ()) - -open Core -open Async - -module M = struct - let whoami = whoami - - module IO = struct - include Cohttp_async.Io - - let map f t = Deferred.map ~f t - - let iter f t = Deferred.List.iter t ~f - - let iter_dontwait f t = - Deferred.don't_wait_for @@ Deferred.List.iter ~how:`Parallel t ~f - - let any = Deferred.any - - let all = Deferred.all - - let is_determined = Deferred.is_determined - - let return_unit = Deferred.unit - end - - let connect path = - let maximum_delay = 30. in - let connect () = - let s = Socket.create Socket.Type.unix in - Monitor.try_with ~extract_exn:true (fun () -> - Socket.connect s (Socket.Address.Unix.create path) - ) - >>= function - | Ok _x -> - let fd = Socket.fd s in - let reader = Reader.create fd in - let writer = Writer.create fd in - return (fd, reader, writer) - | Error e -> - Socket.shutdown s `Both ; raise e - in - let rec retry delay = - Monitor.try_with ~extract_exn:true connect >>= function - | Error - (Unix.Unix_error - (Core_unix.(ECONNREFUSED | ECONNABORTED | ENOENT), _, _) - ) -> - let delay = Float.min maximum_delay delay in - Clock.after (Time.Span.of_sec delay) >>= fun () -> - retry (delay +. delay) - | Error e -> - raise e - | Ok (_, reader, writer) -> - return (reader, writer) - in - retry 0.5 - - let disconnect (_, writer) = Writer.close writer - - module Ivar = struct include Ivar end - - module Mutex = struct - type t = {mutable m: bool; c: unit Condition.t} - - let create () = - let m = false in - let c = Condition.create () in - {m; c} - - let with_lock t f = - let rec wait () = - if Bool.(t.m = false) then ( - t.m <- true ; - return () - ) else - Condition.wait t.c >>= wait - in - wait () >>= fun () -> - Monitor.protect f ~finally:(fun () -> - t.m <- false ; - Condition.broadcast t.c () ; - return () - ) - end - - module Condition = struct - open Async_kernel - - type 'a t = 'a Condition.t - - let create = Condition.create - - let wait = Condition.wait - - let broadcast = Condition.broadcast - - let signal = Condition.signal - end - - module Clock = struct - type timer = {cancel: unit Ivar.t} - - let run_after timeout f = - let timer = {cancel= Ivar.create ()} in - let cancelled = Ivar.read timer.cancel in - let sleep = Clock.after (Time.Span.of_sec (Float.of_int timeout)) in - let _ = - Deferred.any [cancelled; sleep] >>= fun () -> - if Deferred.is_determined cancelled then - return () - else - return (f ()) - in - timer - - let cancel t = Ivar.fill t.cancel () - end -end - -module Client = Message_switch_core.Make.Client (M) -module Server = Message_switch_core.Make.Server (M) -module Mtest = Message_switch_core.Mtest.Make (M) diff --git a/ocaml/message-switch/async/protocol_async.mli b/ocaml/message-switch/async/protocol_async.mli deleted file mode 100644 index d18b37b742c..00000000000 --- a/ocaml/message-switch/async/protocol_async.mli +++ /dev/null @@ -1,23 +0,0 @@ -(* - * Copyright (c) Citrix Systems Inc. - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - *) -open Async -open Message_switch_core - -module Client : S.CLIENT with type 'a io = 'a Deferred.t - -module Server : S.SERVER with type 'a io = 'a Deferred.t - -module Mtest : Mtest.MTEST with type 'a io = 'a Deferred.t diff --git a/ocaml/message-switch/core_test/async/client_async_main.ml b/ocaml/message-switch/core_test/async/client_async_main.ml deleted file mode 100644 index daedfe59bae..00000000000 --- a/ocaml/message-switch/core_test/async/client_async_main.ml +++ /dev/null @@ -1,94 +0,0 @@ -(* - * Copyright (c) Citrix Systems Inc. - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - *) - -module P = Printf -open Core -open Async -open Message_switch_async.Protocol_async - -let path = ref "/var/run/message-switch/sock" - -let name = ref "server" - -let payload = ref "hello" - -let timeout = ref None - -let shutdown = "shutdown" - -let ( >>|= ) m f = - m >>= function - | Ok x -> - f x - | Error y -> - let b = Buffer.create 16 in - let fmt = Format.formatter_of_buffer b in - Client.pp_error fmt y ; - Format.pp_print_flush fmt () ; - raise (Failure (Buffer.contents b)) - -let main () = - Client.connect ~switch:!path () >>|= fun t -> - let counter = ref 0 in - let one () = - incr counter ; - Client.rpc ~t ~queue:!name ~body:!payload () >>|= fun _ -> return () - in - let start = Time.now () in - ( match !timeout with - | None -> - one () - | Some t -> - let rec loop () = - let sofar = Time.diff (Time.now ()) start in - if Time.Span.(sofar > of_sec t) then - return () - else - one () >>= fun () -> loop () - in - loop () - ) - >>= fun () -> - let time = Time.diff (Time.now ()) start in - P.printf "Finished %d RPCs in %.02f\n%!" !counter (Time.Span.to_sec time) ; - Client.rpc ~t ~queue:!name ~body:shutdown () >>|= fun _ -> Shutdown.exit 0 - -let _ = - Arg.parse - [ - ( "-path" - , Arg.Set_string path - , Printf.sprintf "path broker listens on (default %s)" !path - ) - ; ( "-name" - , Arg.Set_string name - , Printf.sprintf "name to send message to (default %s)" !name - ) - ; ( "-payload" - , Arg.Set_string payload - , Printf.sprintf "payload of message to send (default %s)" !payload - ) - ; ( "-secs" - , Arg.String (fun x -> timeout := Some (Float.of_string x)) - , Printf.sprintf - "number of seconds to repeat the same message for (default %s)" - (match !timeout with None -> "None" | Some x -> Float.to_string x) - ) - ] - (fun x -> P.fprintf stderr "Ignoring unexpected argument: %s" x) - "Send a message to a name, optionally waiting for a response" ; - let (_ : 'a Deferred.t) = main () in - never_returns (Scheduler.go ()) diff --git a/ocaml/message-switch/core_test/async/dune b/ocaml/message-switch/core_test/async/dune deleted file mode 100644 index 6e690c35e1d..00000000000 --- a/ocaml/message-switch/core_test/async/dune +++ /dev/null @@ -1,21 +0,0 @@ -(executables - (modes exe) - (names - client_async_main - server_async_main - ) - (libraries - async - async_kernel - async_unix - base - base.caml - cohttp-async - core - core_kernel - core_unix - core_unix.time_unix - message-switch-async - ) -) - diff --git a/ocaml/message-switch/core_test/async/server_async_main.ml b/ocaml/message-switch/core_test/async/server_async_main.ml deleted file mode 100644 index cd7984bec27..00000000000 --- a/ocaml/message-switch/core_test/async/server_async_main.ml +++ /dev/null @@ -1,66 +0,0 @@ -(* - * Copyright (c) Citrix Systems Inc. - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - *) - -module P = Printf -open Core -open Async -open Message_switch_async.Protocol_async - -let path = ref "/var/run/message-switch/sock" - -let name = ref "server" - -let concurrent = ref false - -let shutdown = Ivar.create () - -let process = function - | "shutdown" -> - Ivar.fill shutdown () ; return "ok" - | x -> - return x - -let main () = - let (_ : 'a Deferred.t) = - if !concurrent then - Server.listen_p ~process ~switch:!path ~queue:!name () - else - Server.listen ~process ~switch:!path ~queue:!name () - in - Ivar.read shutdown >>= fun () -> - Clock.after (Time.Span.of_sec 1.) >>= fun () -> exit 0 - -let _ = - Arg.parse - [ - ( "-path" - , Arg.Set_string path - , Printf.sprintf "path broker listens on (default %s)" !path - ) - ; ( "-name" - , Arg.Set_string name - , Printf.sprintf "name to send message to (default %s)" !name - ) - ; ( "-concurrent" - , Arg.Set concurrent - , Printf.sprintf "set concurrent processing of messages (default %b)" - !concurrent - ) - ] - (fun x -> P.fprintf stderr "Ignoring unexpected argument: %s" x) - "Respond to RPCs on a name" ; - let (_ : 'a Deferred.t) = main () in - never_returns (Scheduler.go ()) diff --git a/ocaml/message-switch/core_test/basic-rpc-test.sh b/ocaml/message-switch/core_test/basic-rpc-test.sh index bc281c65f45..851c972b831 100755 --- a/ocaml/message-switch/core_test/basic-rpc-test.sh +++ b/ocaml/message-switch/core_test/basic-rpc-test.sh @@ -29,16 +29,6 @@ SERVER=$! lwt/client_main.exe -path "${SPATH}" -secs "${SECS}" wait "${SERVER}" -echo Performance test of Async to Lwt +echo Performance test of Lwt to Unix lwt/server_main.exe -path "${SPATH}" & -SERVER=$! -async/client_async_main.exe -path "${SPATH}" -secs "${SECS}" -wait "${SERVER}" - -echo Performance test of Async to Async -async/server_async_main.exe -path "${SPATH}" & -SERVER=$! -async/client_async_main.exe -path "${SPATH}" -secs "${SECS}" -wait "${SERVER}" - -../cli/main.exe shutdown --path "${SPATH}" +./client_unix_main.exe -path "${SPATH}" -secs "${SECS}" diff --git a/ocaml/message-switch/core_test/concur-rpc-test.sh b/ocaml/message-switch/core_test/concur-rpc-test.sh index 1403946ba5b..c861516f3c0 100755 --- a/ocaml/message-switch/core_test/concur-rpc-test.sh +++ b/ocaml/message-switch/core_test/concur-rpc-test.sh @@ -29,16 +29,15 @@ SERVER=$! lwt/client_main.exe -path "${SPATH}" -secs "${SECS}" wait "${SERVER}" -echo Performance test of Async to Lwt +echo Performance test of Unix to Lwt lwt/server_main.exe -path "${SPATH}" -concurrent & SERVER=$! -async/client_async_main.exe -path "${SPATH}" -secs "${SECS}" +./client_unix_main.exe -path "${SPATH}" -secs "${SECS}" wait "${SERVER}" - -echo Performance test of Async to Async -async/server_async_main.exe -path "${SPATH}" -concurrent & +echo Performance test of Lwt to Unix +./server_unix_main.exe -path "${SPATH}" & SERVER=$! -async/client_async_main.exe -path "${SPATH}" -secs "${SECS}" +lwt/client_main.exe -path "${SPATH}" -secs "${SECS}" wait "${SERVER}" ../cli/main.exe shutdown --path "${SPATH}" diff --git a/ocaml/message-switch/core_test/dune b/ocaml/message-switch/core_test/dune index cda5c5125aa..a7f0396538d 100644 --- a/ocaml/message-switch/core_test/dune +++ b/ocaml/message-switch/core_test/dune @@ -3,33 +3,21 @@ (names client_unix_main server_unix_main - lock_test_async lock_test_lwt ) (modules client_unix_main - server_unix_main - lock_test_async + server_unix_main lock_test_lwt ) (libraries message-switch-unix message-switch-core - message-switch-async message-switch-lwt threads.posix ) ) -(rule - (alias runtest) - (deps - lock_test_async.exe - ) - (action (run ./lock_test_async.exe)) - (package message-switch) -) - (rule (alias runtest) (deps @@ -45,8 +33,6 @@ (deps client_unix_main.exe server_unix_main.exe - async/client_async_main.exe - async/server_async_main.exe lwt/client_main.exe lwt/server_main.exe lwt/link_test_main.exe @@ -80,8 +66,6 @@ (deps client_unix_main.exe server_unix_main.exe - async/client_async_main.exe - async/server_async_main.exe lwt/client_main.exe lwt/server_main.exe lwt/link_test_main.exe diff --git a/ocaml/message-switch/core_test/lock_test_async.ml b/ocaml/message-switch/core_test/lock_test_async.ml deleted file mode 100644 index 85cde8eaecb..00000000000 --- a/ocaml/message-switch/core_test/lock_test_async.ml +++ /dev/null @@ -1,13 +0,0 @@ -open Core -open Async -open Message_switch_async - -let ( >>= ) = Deferred.( >>= ) - -let test_async_lock () = Protocol_async.Mtest.mutex_provides_mutal_exclusion () - -let () = - don't_wait_for - (test_async_lock () >>= fun () -> shutdown 0 ; Deferred.return ()) - -let () = never_returns (Scheduler.go ()) diff --git a/ocaml/xen-api-client/async/dune b/ocaml/xen-api-client/async/dune deleted file mode 100644 index a3ed8b645b7..00000000000 --- a/ocaml/xen-api-client/async/dune +++ /dev/null @@ -1,25 +0,0 @@ -(library - (name xen_api_client_async) - (public_name xen-api-client-async) - (libraries - async - async_kernel - async_unix - base - cohttp - core - core_unix - core_unix.time_unix - core_kernel - rpclib.core - rpclib.json - rpclib.xml - uri - xapi-client - xapi-consts - xen-api-client - xmlm - ) - (wrapped false) -) - diff --git a/ocaml/xen-api-client/async/xen_api_async_unix.ml b/ocaml/xen-api-client/async/xen_api_async_unix.ml deleted file mode 100644 index 3e8092c1faf..00000000000 --- a/ocaml/xen-api-client/async/xen_api_async_unix.ml +++ /dev/null @@ -1,134 +0,0 @@ -(* - * Copyright (c) 2012 Anil Madhavapeddy - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - *) -open Core -open Async -open Xen_api - -module IO = struct - type 'a t = 'a Deferred.t - - let ( >>= ) = Deferred.( >>= ) - - (* let (>>) m n = m >>= fun _ -> n *) - let return = Deferred.return - - type ic = (unit -> unit Deferred.t) * Reader.t - - type oc = (unit -> unit Deferred.t) * Writer.t - - type conn = unit - - let read_line (_, ic) = - Reader.read_line ic >>| function `Ok s -> Some s | `Eof -> None - - let read (_, ic) len = - let buf = Bytes.create len in - Reader.read ic ~len buf >>| function - | `Ok len' -> - let content = Bytes.sub buf ~pos:0 ~len:len' in - Bytes.to_string content - | `Eof -> - "" - - (* let read_exactly (_, ic) len = - let buf = String.create len in - Reader.really_read ic ~pos:0 ~len buf >>= - function - |`Ok -> return (Some buf) - |`Eof _ -> return None *) - - let write (_, oc) buf = Writer.write oc buf ; return () - - (* let write_line (_, oc) buf = - Writer.write oc buf; - Writer.write oc "\r\n"; - return () *) - - let flush (_, oc) = Async.Writer.flushed oc - - let close ((close1, _), (close2, _)) = close1 () >>= fun () -> close2 () - - let open_connection uri = - match Uri.scheme uri with - | Some "http" -> ( - let port = match Uri.port uri with None -> 80 | Some port -> port in - match Uri.host uri with - | Some host -> - let endp = Host_and_port.create ~host ~port in - Tcp.connect (Tcp.Where_to_connect.of_host_and_port endp) - >>| fun (_, ic, oc) -> - Ok - ( ((fun () -> Reader.close ic), ic) - , ((fun () -> Writer.close oc), oc) - ) - | None -> - return (Error (Failed_to_resolve_hostname "")) - ) - | Some x -> - return (Error (Unsupported_scheme x)) - | None -> - return (Error (Unsupported_scheme "")) - - let sleep s = after (sec s) - - let gettimeofday = Unix.gettimeofday -end - -module M = Make (IO) - -let exn_to_string = function - | Api_errors.Server_error (code, params) -> - Printf.sprintf "%s %s" code (String.concat ~sep:" " params) - | e -> - Printf.sprintf "Caught unexpected exception: %s" (Exn.to_string e) - -let do_it uri string = - let uri = Uri.of_string uri in - let connection = M.make uri in - let ( >>= ) = Deferred.( >>= ) in - Monitor.protect - (fun () -> - M.rpc connection string >>= function - | Ok x -> - return x - | Error e -> - eprintf "Caught: %s\n%!" (exn_to_string e) ; - Exn.reraise e "connection error" - ) - ~finally:(fun () -> M.disconnect connection) - -(* TODO: modify do_it to accept the timeout and remove the warnings *) - -[@@@ocaml.warning "-27"] - -let make ?(timeout = 30.) uri call = - let req = Xmlrpc.string_of_call call in - do_it uri req >>| Xmlrpc.response_of_string - -[@@@ocaml.warning "-27"] - -let make_json ?(timeout = 30.) uri call = - let req = Jsonrpc.string_of_call call in - do_it uri req >>| Jsonrpc.response_of_string - -module Client = Client.ClientF (struct - include Deferred - - let bind a f = bind a ~f -end) - -include Client diff --git a/ocaml/xen-api-client/async/xen_api_async_unix.mli b/ocaml/xen-api-client/async/xen_api_async_unix.mli deleted file mode 100644 index 4d8ac0a2886..00000000000 --- a/ocaml/xen-api-client/async/xen_api_async_unix.mli +++ /dev/null @@ -1,28 +0,0 @@ -(* - * Copyright (C) 2012 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) - -val make : ?timeout:float -> string -> Rpc.call -> Rpc.response Async.Deferred.t -(** [make ?timeout uri] returns an 'rpc' function which can be - passed to Client.* functions *) - -val make_json : - ?timeout:float -> string -> Rpc.call -> Rpc.response Async.Deferred.t -(** [make_json ?timeout uri] returns an 'rpc' function which can be - passed to Client.* functions *) - -include module type of Client.ClientF (struct - include Async.Deferred - - let bind a f = bind a ~f -end) diff --git a/ocaml/xen-api-client/async_examples/dune b/ocaml/xen-api-client/async_examples/dune deleted file mode 100644 index 7d39e42c902..00000000000 --- a/ocaml/xen-api-client/async_examples/dune +++ /dev/null @@ -1,48 +0,0 @@ -(executable - (modes exe) - (name list_vms) - (modules list_vms) - (libraries - async - async_unix - base - base.caml - core - core_kernel - - xapi-consts - xapi-types - xen-api-client - xen-api-client-async - ) -) - -(executable - (modes exe) - (name event_test) - (modules event_test) - (libraries - async - async_unix - base - base.caml - core - core_kernel - rpclib.json - sexplib0 - xapi-consts - xapi-types - xen-api-client - xen-api-client-async - ) -) - -(alias - (name examples) - (deps - list_vms.exe - event_test.exe - ) - (package xen-api-client-async) -) - diff --git a/ocaml/xen-api-client/async_examples/event_test.ml b/ocaml/xen-api-client/async_examples/event_test.ml deleted file mode 100644 index 7107a8bda8f..00000000000 --- a/ocaml/xen-api-client/async_examples/event_test.ml +++ /dev/null @@ -1,175 +0,0 @@ -(* - * Copyright (C) 2012-2014 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) - -open Core -open Async -open Xen_api_async_unix - -let uri = ref "http://127.0.0.1/" - -let username = ref "root" - -let password = ref "password" - -let enable_debug = ref false - -let debug fmt = - Printf.ksprintf - (fun txt -> - if !enable_debug then - eprintf "%s\n%!" txt - ) - fmt - -let error fmt = Printf.ksprintf (fun txt -> eprintf "Error: %s\n%!" txt) fmt - -let info fmt = Printf.ksprintf (fun txt -> eprintf "%s\n%!" txt) fmt - -let watch_events rpc session_id = - let open Event_types in - let module StringMap = Map.Make (String) in - let root = ref StringMap.empty in - - let update map ev = - (* type-specific table *) - let ty = - match StringMap.find map ev.ty with - | None -> - StringMap.empty - | Some x -> - x - in - let ty = - match ev.op with - | `add | `_mod -> ( - match ev.snapshot with - | None -> - error "Event contained no snapshot" ; - ty - | Some s -> - StringMap.update ty ev.reference ~f:(fun _ -> s) - ) - | `del -> - StringMap.remove ty ev.reference - in - if StringMap.is_empty ty then - StringMap.remove map ev.ty - else - StringMap.update map ev.ty ~f:(fun _ -> ty) - in - - let compare () = - let open Event_types in - Event.from ~rpc ~session_id ~classes:["*"] ~token:"" ~timeout:0. - >>= fun rpc -> - let e = event_from_of_rpc rpc in - if List.is_empty e.events then error "Empty list of events" ; - let current = List.fold_left ~init:StringMap.empty ~f:update e.events in - Sequence.iter - ~f:(fun (key, diff) -> - match (key, diff) with - | key, `Left _ -> - error "Replica has extra table: %s" key - | key, `Right _ -> - error "Replica has missing table: %s" key - | _, `Unequal (_, _) -> - () - ) - (StringMap.symmetric_diff !root current ~data_equal:(fun _ _ -> true)) ; - List.iter - ~f:(fun key -> - match StringMap.find !root key with - | None -> - error "Table missing in replica: %s" key - | Some root_table -> - let current_table = StringMap.find_exn current key in - Sequence.iter - ~f:(fun (key, diff) -> - match (key, diff) with - | r, `Left rpc -> - error "Replica has extra object: %s: %s" r - (Jsonrpc.to_string rpc) - | r, `Right rpc -> - error "Replica has missing object: %s: %s" r - (Jsonrpc.to_string rpc) - | r, `Unequal (rpc1, rpc2) -> - error "Replica has out-of-sync object: %s: %s <> %s" r - (Jsonrpc.to_string rpc1) (Jsonrpc.to_string rpc2) - ) - (StringMap.symmetric_diff root_table current_table - ~data_equal:(fun a b -> Base.Poly.equal a b - ) - ) - ) - (StringMap.keys current) ; - return () - in - - let rec loop token = - Event.from ~rpc ~session_id ~classes:["*"] ~token ~timeout:30. - >>= fun rpc -> - debug "received event: %s" (Jsonrpc.to_string rpc) ; - let e = event_from_of_rpc rpc in - List.iter ~f:(fun ev -> root := update !root ev) e.events ; - compare () >>= fun () -> - info "object counts: %s" - (String.concat ~sep:", " - (List.map - ~f:(fun key -> - Printf.sprintf "%s (%d)" key - (StringMap.length (StringMap.find_exn !root key)) - ) - (StringMap.keys !root) - ) - ) ; - loop e.token - in - loop "" - -let main () = - let rpc = make !uri in - Session.login_with_password ~rpc ~uname:!username ~pwd:!password - ~version:"1.0" ~originator:"event_test" - >>= fun session_id -> - let a = watch_events rpc session_id in - let b = watch_events rpc session_id in - a >>= fun () -> - b >>= fun () -> - Session.logout ~rpc ~session_id >>= fun () -> shutdown 0 ; return () - -let _ = - Arg.parse - [ - ( "-uri" - , Arg.Set_string uri - , Printf.sprintf "URI of server to connect to (default %s)" !uri - ) - ; ( "-u" - , Arg.Set_string username - , Printf.sprintf "Username to log in with (default %s)" !username - ) - ; ( "-pw" - , Arg.Set_string password - , Printf.sprintf "Password to log in with (default %s)" !password - ) - ; ( "-debug" - , Arg.Set enable_debug - , Printf.sprintf "Enable debug logging (default %b)" !enable_debug - ) - ] - (fun x -> eprintf "Ignoring argument: %s\n" x) - "Simple example which tracks the server state via events" ; - - let (_ : unit Deferred.t) = main () in - never_returns (Scheduler.go ()) diff --git a/ocaml/xen-api-client/async_examples/list_vms.ml b/ocaml/xen-api-client/async_examples/list_vms.ml deleted file mode 100644 index 6aac0feb527..00000000000 --- a/ocaml/xen-api-client/async_examples/list_vms.ml +++ /dev/null @@ -1,56 +0,0 @@ -(* - * Copyright (C) 2012 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) - -open Core -open Async -open Xen_api_async_unix - -let uri = ref "http://127.0.0.1/" - -let username = ref "root" - -let password = ref "password" - -let main () = - let rpc = make !uri in - Session.login_with_password ~rpc ~uname:!username ~pwd:!password - ~version:"1.0" ~originator:"list_vms" - >>= fun session_id -> - VM.get_all_records ~rpc ~session_id >>= fun vms -> - List.iter - ~f:(fun (_, vm_rec) -> printf "VM %s\n%!" vm_rec.API.vM_name_label) - vms ; - Session.logout ~rpc ~session_id >>= fun () -> shutdown 0 ; return () - -let _ = - Arg.parse - [ - ( "-uri" - , Arg.Set_string uri - , Printf.sprintf "URI of server to connect to (default %s)" !uri - ) - ; ( "-u" - , Arg.Set_string username - , Printf.sprintf "Username to log in with (default %s)" !username - ) - ; ( "-pw" - , Arg.Set_string password - , Printf.sprintf "Password to log in with (default %s)" !password - ) - ] - (fun x -> eprintf "Ignoring argument: %s\n" x) - "Simple example which lists VMs found on a pool" ; - - let (_ : unit Deferred.t) = main () in - never_returns (Scheduler.go ()) diff --git a/quality-gate.sh b/quality-gate.sh index 9c3d3c2b5f8..01e2b301148 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -25,7 +25,7 @@ verify-cert () { } mli-files () { - N=508 + N=506 # do not count ml files from the tests in ocaml/{tests/perftest/quicktest} MLIS=$(git ls-files -- '**/*.mli' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) MLS=$(git ls-files -- '**/*.ml' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) diff --git a/xapi-idl.opam b/xapi-idl.opam index c1fff027077..20c9ea0f1af 100644 --- a/xapi-idl.opam +++ b/xapi-idl.opam @@ -22,7 +22,7 @@ depends: [ "ipaddr" "logs" "lwt" {>= "5.0.0"} - "message-switch-async" {with-test} + "message-switch-lwt" {with-test} "message-switch-core" "message-switch-unix" "mtime" diff --git a/xapi-idl.opam.template b/xapi-idl.opam.template index beea3845af6..5f6105ba5da 100644 --- a/xapi-idl.opam.template +++ b/xapi-idl.opam.template @@ -20,7 +20,7 @@ depends: [ "ipaddr" "logs" "lwt" {>= "5.0.0"} - "message-switch-async" {with-test} + "message-switch-lwt" {with-test} "message-switch-core" "message-switch-unix" "mtime" diff --git a/xapi-storage-script.opam b/xapi-storage-script.opam index a8df41ef405..0a974584ac2 100644 --- a/xapi-storage-script.opam +++ b/xapi-storage-script.opam @@ -14,23 +14,19 @@ depends: [ "ocaml" "dune" {>= "3.15"} "conf-python-3" {with-test} - "xapi-idl" {>= "0.10.0"} - "xapi-storage" - "async" {>= "v0.9.0"} - "async_inotify" - "async_unix" {>= "112.24.00"} - "core" + "base" + "inotify" + "lwt" + "message-switch-lwt" "message-switch-unix" - "message-switch-async" - "rpclib" - "rpclib-async" "ppx_deriving_rpc" "ppx_sexp_conv" + "rpclib" + "rpclib-lwt" + "sexplib0" + "xapi-idl" {>= "0.10.0"} "xapi-stdext-date" -] -# python 2.7 is not enough to ensure the availability of 'python' in these -depexts: [ - ["python"] {os-family = "debian" & with-test} + "xapi-storage" ] synopsis: "A directory full of scripts can be a Xapi storage implementation" description: """ diff --git a/xapi-storage-script.opam.template b/xapi-storage-script.opam.template index b40cc0880b5..d569fda47b8 100644 --- a/xapi-storage-script.opam.template +++ b/xapi-storage-script.opam.template @@ -12,23 +12,19 @@ depends: [ "ocaml" "dune" {>= "3.15"} "conf-python-3" {with-test} - "xapi-idl" {>= "0.10.0"} - "xapi-storage" - "async" {>= "v0.9.0"} - "async_inotify" - "async_unix" {>= "112.24.00"} - "core" + "base" + "inotify" + "lwt" + "message-switch-lwt" "message-switch-unix" - "message-switch-async" - "rpclib" - "rpclib-async" "ppx_deriving_rpc" "ppx_sexp_conv" + "rpclib" + "rpclib-lwt" + "sexplib0" + "xapi-idl" {>= "0.10.0"} "xapi-stdext-date" -] -# python 2.7 is not enough to ensure the availability of 'python' in these -depexts: [ - ["python"] {os-family = "debian" & with-test} + "xapi-storage" ] synopsis: "A directory full of scripts can be a Xapi storage implementation" description: """ diff --git a/xen-api-client-async.opam b/xen-api-client-async.opam deleted file mode 100644 index c283cb6d1e8..00000000000 --- a/xen-api-client-async.opam +++ /dev/null @@ -1,38 +0,0 @@ -# This file is generated by dune, edit dune-project instead - -opam-version: "2.0" -maintainer: "xen-api@lists.xen.org" -authors: [ "David Scott" "Anil Madhavapeddy" "Jerome Maloberti" "John Else" "Jon Ludlam" "Thomas Sanders" "Mike McClurg" ] -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -homepage: "https://github.com/xapi-project/xen-api" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -tags: [ - "org:xapi-project" -] -build: [ - ["dune" "build" "-p" name "-j" jobs] - ["dune" "runtest" "-p" name "-j" jobs] {with-test} -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "async" {>= "v0.9.0"} - "async_kernel" - "async_unix" - "base" - "base-threads" - "cohttp" {>= "0.22.0"} - "core" - "core_kernel" - "core_unix" - "rpclib" - "uri" - "xen-api-client" - "xmlm" -] -synopsis: - "Xen-API client library for remotely-controlling a xapi host" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} diff --git a/xen-api-client-async.opam.template b/xen-api-client-async.opam.template deleted file mode 100644 index 8224d441c1d..00000000000 --- a/xen-api-client-async.opam.template +++ /dev/null @@ -1,36 +0,0 @@ -opam-version: "2.0" -maintainer: "xen-api@lists.xen.org" -authors: [ "David Scott" "Anil Madhavapeddy" "Jerome Maloberti" "John Else" "Jon Ludlam" "Thomas Sanders" "Mike McClurg" ] -license: "LGPL-2.1-only WITH OCaml-LGPL-linking-exception" -homepage: "https://github.com/xapi-project/xen-api" -dev-repo: "git+https://github.com/xapi-project/xen-api.git" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -tags: [ - "org:xapi-project" -] -build: [ - ["dune" "build" "-p" name "-j" jobs] - ["dune" "runtest" "-p" name "-j" jobs] {with-test} -] -depends: [ - "ocaml" - "dune" {>= "3.15"} - "async" {>= "v0.9.0"} - "async_kernel" - "async_unix" - "base" - "base-threads" - "cohttp" {>= "0.22.0"} - "core" - "core_kernel" - "core_unix" - "rpclib" - "uri" - "xen-api-client" - "xmlm" -] -synopsis: - "Xen-API client library for remotely-controlling a xapi host" -url { - src: "https://github.com/xapi-project/xen-api/archive/master.tar.gz" -} From 94b60b269d795a3cbc48aef7914072ff6beda01f Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 22 Mar 2024 16:48:47 +0000 Subject: [PATCH 075/141] xapi-storage-script: make conversion functions testable Separate the new functions into a private library, which can be tested Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/dune | 20 +- ocaml/xapi-storage-script/lib.ml | 273 ++++++++++++++++++ ocaml/xapi-storage-script/lib.mli | 99 +++++++ ocaml/xapi-storage-script/main.ml | 260 +---------------- .../xapi_storage_script_types.ml | 25 -- quality-gate.sh | 2 +- 6 files changed, 391 insertions(+), 288 deletions(-) create mode 100644 ocaml/xapi-storage-script/lib.ml create mode 100644 ocaml/xapi-storage-script/lib.mli delete mode 100644 ocaml/xapi-storage-script/xapi_storage_script_types.ml diff --git a/ocaml/xapi-storage-script/dune b/ocaml/xapi-storage-script/dune index 9670539ee30..0f5c5ebb7de 100644 --- a/ocaml/xapi-storage-script/dune +++ b/ocaml/xapi-storage-script/dune @@ -1,11 +1,24 @@ +(library + (name private) + (modules lib) + (libraries + fmt + inotify + inotify.lwt + lwt + lwt.unix + rpclib.core + ) + (preprocess (pps ppx_deriving_rpc)) + ) + (executable (name main) + (modules main) (libraries base fmt - inotify - inotify.lwt logs logs.lwt lwt @@ -13,6 +26,7 @@ message-switch-lwt message-switch-unix ppx_deriving.runtime + private result rpclib.core rpclib.json @@ -32,7 +46,7 @@ xapi-stdext-date xapi-storage ) - (preprocess (pps ppx_deriving_rpc ppx_sexp_conv)) + (preprocess (pps ppx_sexp_conv)) ) (install diff --git a/ocaml/xapi-storage-script/lib.ml b/ocaml/xapi-storage-script/lib.ml new file mode 100644 index 00000000000..732a9f986cc --- /dev/null +++ b/ocaml/xapi-storage-script/lib.ml @@ -0,0 +1,273 @@ +(* Copyright (C) Cloud Software Group Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; version 2.1 only. with the special + exception on linking described in file LICENSE. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. +*) + +module StringMap = Map.Make (String) + +module Types = struct + type backtrace = { + error: string + ; (* Python json.dumps and rpclib are not very friendly *) + files: string list + ; lines: int list + } + [@@deriving rpc] + + (* This matches xapi.py:exception *) + type error = {code: string; params: string list; backtrace: backtrace} + [@@deriving rpc] +end + +let ( >>= ) = Lwt.bind + +let return = Lwt_result.return + +let fail = Lwt_result.fail + +let ( // ) = Filename.concat + +module Sys = struct + type file = Regular | Directory | Other | Missing | Unknown + + let file_kind ~follow_symlinks path = + Lwt.try_bind + (fun () -> + ( if follow_symlinks then + Lwt_unix.LargeFile.stat + else + Lwt_unix.LargeFile.lstat + ) + path + ) + (function + | s -> ( + match s.Unix.LargeFile.st_kind with + | Unix.S_REG -> + Lwt.return Regular + | Unix.S_DIR -> + Lwt.return Directory + | _ -> + Lwt.return Other + ) + ) + (function + | Unix.Unix_error (Unix.ENOENT, _, _) -> + Lwt.return Missing + | Unix.Unix_error ((Unix.EACCES | Unix.ELOOP), _, _) -> + Lwt.return Unknown + | e -> + Lwt.fail e + ) + + let access path modes = + Lwt.try_bind + (fun () -> Lwt_unix.access path modes) + Lwt_result.return + (fun exn -> fail (`not_executable (path, exn))) + + let assert_is_executable path = + file_kind ~follow_symlinks:true path >>= function + | Directory | Other | Missing | Unknown -> + fail (`missing path) + | Regular -> ( + access path [Unix.X_OK] >>= function + | Error exn -> + fail exn + | Ok () -> + return () + ) + + let read_file_contents path = + Lwt_io.(with_file ~mode:input ~flags:[O_RDONLY] ~perm:0o000 path read) + + let save ~contents path = + Lwt_io.(with_file ~mode:output path (Fun.flip write contents)) + + let readdir path = + path |> Lwt_unix.files_of_directory |> Lwt_stream.to_list >>= fun listing -> + List.filter (function "." | ".." -> false | _ -> true) listing + |> Lwt.return + + let mkdir_p ?(perm = 0o755) path = + let rec loop acc path = + let create_dir () = Lwt_unix.mkdir path perm in + let create_subdirs () = Lwt_list.iter_s (fun f -> f ()) acc in + Lwt.try_bind create_dir create_subdirs (function + | Unix.(Unix_error (EEXIST, _, _)) -> + (* create directories, parents first *) + create_subdirs () + | Unix.(Unix_error (ENOENT, _, _)) -> + let parent = Filename.dirname path in + loop (create_dir :: acc) parent + | exn -> + let msg = + Printf.sprintf {|Could not create directory "%s" because: %s|} + path (Printexc.to_string exn) + in + Lwt.fail (Failure msg) + ) + in + loop [] path +end + +module Signal = struct + type t = int + + let to_string s = Fmt.(str "%a" Dump.signal s) +end + +module Process = struct + module Output = struct + type exit_or_signal = Exit_non_zero of int | Signal of Signal.t + + type t = { + exit_status: (unit, exit_or_signal) Result.t + ; stdout: string + ; stderr: string + } + + let exit_or_signal_of_unix = function + | Unix.WEXITED 0 -> + Ok () + | WEXITED n -> + Error (Exit_non_zero n) + | WSIGNALED n -> + Error (Signal n) + | WSTOPPED n -> + Error (Signal n) + end + + let create ~env ~prog ~args = + let args = Array.of_list (prog :: args) in + let cmd = (prog, args) in + + let env = + Unix.environment () + |> Array.to_seq + |> Seq.map (fun kv -> + let k, v = Scanf.sscanf kv "%s@=%s" (fun k v -> (k, v)) in + (k, v) + ) + |> StringMap.of_seq + |> StringMap.add_seq (List.to_seq env) + |> StringMap.to_seq + |> Seq.map (fun (k, v) -> Printf.sprintf "%s=%s" k v) + |> Array.of_seq + in + + Lwt_process.open_process_full ~env cmd + + let close chan () = Lwt_io.close chan + + let send chan data = + Lwt.finalize (fun () -> Lwt_io.write chan data) (close chan) + + let receive chan = Lwt.finalize (fun () -> Lwt_io.read chan) (close chan) + + let run ~env ~prog ~args ~input = + let p = create ~env ~prog ~args in + let sender = send p#stdin input in + let receiver_out = receive p#stdout in + let receiver_err = receive p#stderr in + Lwt.catch + (fun () -> + let receiver = Lwt.both receiver_out receiver_err in + Lwt.both sender receiver >>= fun ((), (stdout, stderr)) -> + p#status >>= fun status -> + let exit_status = Output.exit_or_signal_of_unix status in + Lwt.return {Output.exit_status; stdout; stderr} + ) + (function + | Lwt.Canceled as exn -> + Lwt.cancel receiver_out ; Lwt.cancel receiver_err ; Lwt.fail exn + | exn -> + Lwt.fail exn + ) +end + +module FileWatcher = struct + type move = Away of string | Into of string + + type event = + | Created of string + | Unlinked of string + | Modified of string + | Moved of move + | Queue_overflow (** Consumer is not reading fast enough, events missed *) + + let create path = + Lwt_inotify.create () >>= fun desc -> + let watches = Hashtbl.create 32 in + let selectors = + Inotify.[S_Close; S_Create; S_Delete; S_Delete_self; S_Modify; S_Move] + in + Lwt_inotify.add_watch desc path selectors >>= fun watch -> + (* Deduplicate the watches by removing the previous one from inotify and + replacing it in the table *) + let maybe_remove = + if Hashtbl.mem watches watch then + Lwt_inotify.rm_watch desc watch + else + Lwt.return_unit + in + maybe_remove >>= fun () -> + Hashtbl.replace watches watch path ; + Lwt.return (watches, desc) + + let read (watches, desc) = + Lwt_inotify.read desc >>= fun (wd, mask, _cookie, filename) -> + let overflowed = + Inotify.int_of_watch wd = -1 && mask = [Inotify.Q_overflow] + in + let watch_path = Hashtbl.find_opt watches wd in + match (overflowed, watch_path) with + | true, _ -> + Lwt.return [Queue_overflow] + | _, None -> + Lwt.return [] + | _, Some base_path -> + let path = + match filename with + | None -> + base_path + | Some name -> + base_path // name + in + + List.filter_map + (function + | Inotify.Access + | Attrib + | Isdir + | Open + | Close_nowrite + | Ignored + | Unmount -> + None + | Create -> + Some (Created path) + | Delete | Delete_self -> + Some (Unlinked path) + | Close_write | Modify | Move_self -> + Some (Modified path) + | Moved_from -> + Some (Moved (Away path)) + | Moved_to -> + Some (Moved (Into path)) + | Q_overflow -> + Some Queue_overflow + ) + mask + |> Lwt.return +end + +module Clock = struct let after ~seconds = Lwt_unix.sleep seconds end diff --git a/ocaml/xapi-storage-script/lib.mli b/ocaml/xapi-storage-script/lib.mli new file mode 100644 index 00000000000..1dd67312bad --- /dev/null +++ b/ocaml/xapi-storage-script/lib.mli @@ -0,0 +1,99 @@ +(* Copyright (C) Cloud Software Group Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; version 2.1 only. with the special + exception on linking described in file LICENSE. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. +*) + +module Types : sig + type backtrace = {error: string; files: string list; lines: int list} + + val rpc_of_backtrace : backtrace -> Rpc.t + + val backtrace_of_rpc : Rpc.t -> backtrace + + type error = {code: string; params: string list; backtrace: backtrace} + + val rpc_of_error : error -> Rpc.t + + val error_of_rpc : Rpc.t -> error +end + +module Sys : sig + type file = Regular | Directory | Other | Missing | Unknown + + val file_kind : follow_symlinks:bool -> string -> file Lwt.t + + val access : + string + -> Unix.access_permission list + -> (unit, [> `not_executable of string * exn]) result Lwt.t + + val assert_is_executable : + string + -> (unit, [> `missing of string | `not_executable of string * exn]) result + Lwt.t + + val read_file_contents : string -> string Lwt.t + + val save : contents:string -> string -> unit Lwt.t + + val readdir : string -> string list Lwt.t + + val mkdir_p : ?perm:int -> string -> unit Lwt.t +end + +module Signal : sig + type t + + val to_string : t -> string +end + +module Process : sig + module Output : sig + type exit_or_signal = Exit_non_zero of int | Signal of Signal.t + + type t = { + exit_status: (unit, exit_or_signal) result + ; stdout: string + ; stderr: string + } + end + + val run : + env:(string * string) list + -> prog:string + -> args:string list + -> input:string + -> Output.t Lwt.t + (** Runs a cli program, writes [input] into its stdin, then closing the fd, + and finally waits for the program to finish and returns the exit status, + its stdout and stderr. *) +end + +module FileWatcher : sig + type move = Away of string | Into of string + + type event = + | Created of string + | Unlinked of string + | Modified of string + | Moved of move + | Queue_overflow (** Consumer is not reading fast enough, events missed *) + + val create : + string -> ((Inotify.watch, string) Hashtbl.t * Lwt_inotify.t) Lwt.t + + val read : + (Inotify.watch, string) Hashtbl.t * Lwt_inotify.t -> event list Lwt.t +end + +module Clock : sig + val after : seconds:float -> unit Lwt.t +end diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 8c57a9dc759..3b295f277bf 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -12,12 +12,11 @@ * GNU Lesser General Public License for more details. *) module R = Rpc -module Types = Xapi_storage_script_types module Plugin_client = Xapi_storage.Plugin.Plugin (Rpc_lwt.GenClient ()) module Volume_client = Xapi_storage.Control.Volume (Rpc_lwt.GenClient ()) module Sr_client = Xapi_storage.Control.Sr (Rpc_lwt.GenClient ()) module Datapath_client = Xapi_storage.Data.Datapath (Rpc_lwt.GenClient ()) -module StringMap = Map.Make (String) +open Private.Lib let ( >>= ) = Lwt.bind @@ -42,263 +41,6 @@ module Deferred = struct let try_with f = Lwt.try_bind f return fail end -module Sys = struct - type file = Regular | Directory | Other | Missing | Unknown - - let file_kind ~follow_symlinks path = - Lwt.try_bind - (fun () -> - ( if follow_symlinks then - Lwt_unix.LargeFile.stat - else - Lwt_unix.LargeFile.lstat - ) - path - ) - (function - | s -> ( - match s.Unix.LargeFile.st_kind with - | Unix.S_REG -> - Lwt.return Regular - | Unix.S_DIR -> - Lwt.return Directory - | _ -> - Lwt.return Other - ) - ) - (function - | Unix.Unix_error (Unix.ENOENT, _, _) -> - Lwt.return Missing - | Unix.Unix_error ((Unix.EACCES | Unix.ELOOP), _, _) -> - Lwt.return Unknown - | e -> - Lwt.fail e - ) - - let access path modes = - Lwt.try_bind - (fun () -> Lwt_unix.access path modes) - return - (fun exn -> fail (`not_executable (path, exn))) - - let assert_is_executable path = - file_kind ~follow_symlinks:true path >>= function - | Directory | Other | Missing | Unknown -> - fail (`missing path) - | Regular -> ( - access path [Unix.X_OK] >>= function - | Error exn -> - fail exn - | Ok () -> - return () - ) - - let read_file_contents path = - Lwt_io.(with_file ~mode:input ~flags:[O_RDONLY] ~perm:0o000 path read) - - let save ~contents path = - Lwt_io.(with_file ~mode:output path (Fun.flip write contents)) - - let readdir path = - path |> Lwt_unix.files_of_directory |> Lwt_stream.to_list >>= fun listing -> - List.filter (function "." | ".." -> false | _ -> true) listing - |> Lwt.return - - let mkdir_p ?(perm = 0o755) path = - let rec loop acc path = - let create_dir () = Lwt_unix.mkdir path perm in - let create_subdirs () = Lwt_list.iter_s (fun f -> f ()) acc in - Lwt.try_bind create_dir create_subdirs (function - | Unix.(Unix_error (EEXIST, _, _)) -> - (* create directories, parents first *) - create_subdirs () - | Unix.(Unix_error (ENOENT, _, _)) -> - let parent = Filename.dirname path in - loop (create_dir :: acc) parent - | exn -> - let msg = - Printf.sprintf {|Could not create directory "%s" because: %s|} - path (Printexc.to_string exn) - in - Lwt.fail (Failure msg) - ) - in - loop [] path -end - -module Signal = struct - type t = int - - let to_string s = Fmt.(str "%a" Dump.signal s) -end - -module Process : sig - module Output : sig - type exit_or_signal = Exit_non_zero of int | Signal of Signal.t - - type t = { - exit_status: (unit, exit_or_signal) Result.t - ; stdout: string - ; stderr: string - } - end - - val run : - env:(string * string) list - -> prog:string - -> args:string list - -> input:string - -> Output.t Lwt.t - (** Runs a cli program prepeding [env] to its environment, writes [input] - into its stdin, then closing the fd, and finally waits for the program to - finish and returns the exit status, its stdout and stderr. *) -end = struct - module Output = struct - type exit_or_signal = Exit_non_zero of int | Signal of Signal.t - - type t = { - exit_status: (unit, exit_or_signal) Result.t - ; stdout: string - ; stderr: string - } - - let exit_or_signal_of_unix = function - | Unix.WEXITED 0 -> - Ok () - | WEXITED n -> - Error (Exit_non_zero n) - | WSIGNALED n -> - Error (Signal n) - | WSTOPPED n -> - Error (Signal n) - end - - let create ~env ~prog ~args = - let args = Array.of_list (prog :: args) in - let cmd = (prog, args) in - - let env = - Unix.environment () - |> Array.to_seq - |> Seq.map (fun kv -> - let k, v = Scanf.sscanf kv "%s@=%s" (fun k v -> (k, v)) in - (k, v) - ) - |> StringMap.of_seq - |> StringMap.add_seq (List.to_seq env) - |> StringMap.to_seq - |> Seq.map (fun (k, v) -> Printf.sprintf "%s=%s" k v) - |> Array.of_seq - in - - Lwt_process.open_process_full ~env cmd - - let close chan () = Lwt_io.close chan - - let send chan data = - Lwt.finalize (fun () -> Lwt_io.write chan data) (close chan) - - let receive chan = Lwt.finalize (fun () -> Lwt_io.read chan) (close chan) - - let run ~env ~prog ~args ~input = - let p = create ~env ~prog ~args in - let sender = send p#stdin input in - let receiver_out = receive p#stdout in - let receiver_err = receive p#stderr in - Lwt.catch - (fun () -> - let receiver = Lwt.both receiver_out receiver_err in - Lwt.both sender receiver >>= fun ((), (stdout, stderr)) -> - p#status >>= fun status -> - let exit_status = Output.exit_or_signal_of_unix status in - Lwt.return {Output.exit_status; stdout; stderr} - ) - (function - | Lwt.Canceled as exn -> - Lwt.cancel receiver_out ; Lwt.cancel receiver_err ; Lwt.fail exn - | exn -> - Lwt.fail exn - ) -end - -module FileWatcher = struct - type move = Away of string | Into of string - - type event = - | Created of string - | Unlinked of string - | Modified of string - | Moved of move - | Queue_overflow (** Consumer is not reading fast enough, events missed *) - - let create path = - Lwt_inotify.create () >>= fun desc -> - let watches = Hashtbl.create 32 in - let selectors = - Inotify.[S_Close; S_Create; S_Delete; S_Delete_self; S_Modify; S_Move] - in - Lwt_inotify.add_watch desc path selectors >>= fun watch -> - (* Deduplicate the watches by removing the previous one from inotify and - replacing it in the table *) - let maybe_remove = - if Hashtbl.mem watches watch then - Lwt_inotify.rm_watch desc watch - else - Lwt.return_unit - in - maybe_remove >>= fun () -> - Hashtbl.replace watches watch path ; - Lwt.return (watches, desc) - - let rec read (watches, desc) = - Lwt_inotify.read desc >>= fun (wd, mask, _cookie, filename) -> - let overflowed = - Inotify.int_of_watch wd = -1 && mask = [Inotify.Q_overflow] - in - let watch_path = Hashtbl.find_opt watches wd in - match (overflowed, watch_path) with - | true, _ -> - Lwt.return [Queue_overflow] - | _, None -> - Lwt.return [] - | _, Some base_path -> - let path = - match filename with - | None -> - base_path - | Some name -> - base_path // name - in - - List.filter_map - (function - | Inotify.Access - | Attrib - | Isdir - | Open - | Close_nowrite - | Ignored - | Unmount -> - None - | Create -> - Some (Created path) - | Delete | Delete_self -> - Some (Unlinked path) - | Close_write | Modify | Move_self -> - Some (Modified path) - | Moved_from -> - Some (Moved (Away path)) - | Moved_to -> - Some (Moved (Into path)) - | Q_overflow -> - Some Queue_overflow - ) - mask - |> Lwt.return -end - -module Clock = struct let after ~seconds = Lwt_unix.sleep seconds end - type config = {mutable use_observer: bool} let config = {use_observer= false} diff --git a/ocaml/xapi-storage-script/xapi_storage_script_types.ml b/ocaml/xapi-storage-script/xapi_storage_script_types.ml deleted file mode 100644 index 9b8d9456ccc..00000000000 --- a/ocaml/xapi-storage-script/xapi_storage_script_types.ml +++ /dev/null @@ -1,25 +0,0 @@ -(* - * Copyright (C) Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) - -type backtrace = { - error: string - ; (* Python json.dumps and rpclib are not very friendly *) - files: string list - ; lines: int list -} -[@@deriving rpc] - -(* This matches xapi.py:exception *) -type error = {code: string; params: string list; backtrace: backtrace} -[@@deriving rpc] diff --git a/quality-gate.sh b/quality-gate.sh index 01e2b301148..c3cbd196b0b 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -25,7 +25,7 @@ verify-cert () { } mli-files () { - N=506 + N=505 # do not count ml files from the tests in ocaml/{tests/perftest/quicktest} MLIS=$(git ls-files -- '**/*.mli' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) MLS=$(git ls-files -- '**/*.ml' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) From ad195e3a4e73bf3b2512f05e45474d8f3ba8caf6 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Thu, 1 Aug 2024 16:36:54 +0100 Subject: [PATCH 076/141] xapi-storage-script: remove complexity from the inotify watcher The number of actions for the plugin directories is reduced to two, reload all the directory's contents, or reload a file's contents. This means that when a file is modified is removed and readded to the plugins, and if the directory changed in any other way the whole directory is reloaded. The code also tries to reduce duplication in the plugin watchers as much as possible. Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/lib.ml | 26 ++----- ocaml/xapi-storage-script/lib.mli | 11 +-- ocaml/xapi-storage-script/main.ml | 118 +++++++++++------------------- 3 files changed, 51 insertions(+), 104 deletions(-) diff --git a/ocaml/xapi-storage-script/lib.ml b/ocaml/xapi-storage-script/lib.ml index 732a9f986cc..f9dba17040a 100644 --- a/ocaml/xapi-storage-script/lib.ml +++ b/ocaml/xapi-storage-script/lib.ml @@ -194,15 +194,8 @@ module Process = struct ) end -module FileWatcher = struct - type move = Away of string | Into of string - - type event = - | Created of string - | Unlinked of string - | Modified of string - | Moved of move - | Queue_overflow (** Consumer is not reading fast enough, events missed *) +module DirWatcher = struct + type event = Modified of string | Changed let create path = Lwt_inotify.create () >>= fun desc -> @@ -231,7 +224,7 @@ module FileWatcher = struct let watch_path = Hashtbl.find_opt watches wd in match (overflowed, watch_path) with | true, _ -> - Lwt.return [Queue_overflow] + Lwt.return [Changed] | _, None -> Lwt.return [] | _, Some base_path -> @@ -253,18 +246,11 @@ module FileWatcher = struct | Ignored | Unmount -> None - | Create -> - Some (Created path) - | Delete | Delete_self -> - Some (Unlinked path) | Close_write | Modify | Move_self -> Some (Modified path) - | Moved_from -> - Some (Moved (Away path)) - | Moved_to -> - Some (Moved (Into path)) - | Q_overflow -> - Some Queue_overflow + | Create | Delete | Delete_self | Moved_from | Moved_to | Q_overflow + -> + Some Changed ) mask |> Lwt.return diff --git a/ocaml/xapi-storage-script/lib.mli b/ocaml/xapi-storage-script/lib.mli index 1dd67312bad..960c449e339 100644 --- a/ocaml/xapi-storage-script/lib.mli +++ b/ocaml/xapi-storage-script/lib.mli @@ -77,15 +77,10 @@ module Process : sig its stdout and stderr. *) end -module FileWatcher : sig - type move = Away of string | Into of string - +module DirWatcher : sig type event = - | Created of string - | Unlinked of string - | Modified of string - | Moved of move - | Queue_overflow (** Consumer is not reading fast enough, events missed *) + | Modified of string (** File contents changed *) + | Changed (** Something in the directory changed, read anew *) val create : string -> ((Inotify.watch, string) Hashtbl.t * Lwt_inotify.t) Lwt.t diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 3b295f277bf..6ce7b996976 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -1775,33 +1775,52 @@ let rec diff a b = (* default false due to bugs in SMAPIv3 plugins, once they are fixed this should be set to true *) let concurrent = ref false -type action_file = Create of string | Delete of string -type action_dir = Files of action_file list | Sync | Nothing +type reload = All | Files of string list | Nothing let actions_from events = List.fold_left (fun acc event -> match (event, acc) with - | FileWatcher.Queue_overflow, _ -> - Sync - | _, Sync -> - Sync - | (Moved (Away path) | Unlinked path), Nothing -> - Files [Delete path] - | (Moved (Away path) | Unlinked path), Files files -> - Files (Delete path :: files) - | (Moved (Into path) | Created path), Nothing -> - Files [Create path] - | (Moved (Into path) | Created path), Files files -> - Files (Create path :: files) - | Modified path, Nothing -> - Files [Create path; Delete path] + | DirWatcher.Modified path, Nothing -> + Files [path] | Modified path, Files files -> - Files (Create path :: Delete path :: files) + Files (path :: files) + | Changed, _ | _, All -> + All ) Nothing events +let reload_all root ~create ~destroy = + let* needed = Sys.readdir root in + let got_already = Base.Hashtbl.keys servers in + let* () = Lwt.join (List.map create (diff needed got_already)) in + Lwt.join (List.map destroy (diff got_already needed)) + +let reload_file ~create ~destroy path = + let name = Filename.basename path in + let* () = destroy name in + create name + +let reload root ~create ~destroy = function + | All -> + reload_all root ~create ~destroy + | Files files -> + Lwt_list.iter_p (reload_file ~create ~destroy) files + | Nothing -> + Lwt.return_unit + +let rec watch_loop pipe root ~create ~destroy = + let* () = + let* events = DirWatcher.read pipe in + reload root ~create ~destroy (actions_from events) + in + watch_loop pipe root ~create ~destroy + +let watch_plugins ~pipe ~root ~create ~destroy = + reload_all root ~create ~destroy >>= fun () -> + watch_loop pipe root ~create ~destroy + let watch_volume_plugins ~volume_root ~switch_path ~pipe () = let create volume_plugin_name = if Base.Hashtbl.mem servers volume_plugin_name then @@ -1831,65 +1850,12 @@ let watch_volume_plugins ~volume_root ~switch_path ~pipe () = | None -> Lwt.return_unit in - let sync () = - Sys.readdir volume_root >>= fun needed -> - let got_already : string list = Base.Hashtbl.keys servers in - Lwt.join (List.map create (diff needed got_already)) >>= fun () -> - Lwt.join (List.map destroy (diff got_already needed)) - in - sync () >>= fun () -> - let resolve_file = function - | Create path -> - create (Filename.basename path) - | Delete path -> - destroy (Filename.basename path) - in - let resolve = function - | Sync -> - sync () - | Nothing -> - Lwt.return_unit - | Files files -> - Lwt_list.iter_s resolve_file (List.rev files) - in - let rec loop () = - (FileWatcher.read pipe >>= fun events -> resolve (actions_from events)) - >>= fun () -> loop () - in - loop () + watch_plugins ~pipe ~root:volume_root ~create ~destroy let watch_datapath_plugins ~datapath_root ~pipe () = - let sync () = - Sys.readdir datapath_root >>= fun needed -> - let got_already : string list = Base.Hashtbl.keys servers in - Lwt.join - (List.map - (Datapath_plugins.register ~datapath_root) - (diff needed got_already) - ) - >>= fun () -> - Lwt.join (List.map Datapath_plugins.unregister (diff got_already needed)) - in - sync () >>= fun () -> - let resolve_file = function - | Create path -> - Datapath_plugins.register ~datapath_root (Filename.basename path) - | Delete path -> - Datapath_plugins.unregister (Filename.basename path) - in - let resolve = function - | Sync -> - sync () - | Nothing -> - Lwt.return_unit - | Files files -> - Lwt_list.iter_s resolve_file (List.rev files) - in - let rec loop () = - (FileWatcher.read pipe >>= fun events -> resolve (actions_from events)) - >>= fun () -> loop () - in - loop () + let create = Datapath_plugins.register ~datapath_root in + let destroy = Datapath_plugins.unregister in + watch_plugins ~pipe ~root:datapath_root ~create ~destroy let self_test_plugin ~root_dir plugin = let volume_script_dir = Filename.(concat (concat root_dir "volume") plugin) in @@ -1971,9 +1937,9 @@ let self_test ~root_dir = let main ~root_dir ~state_path ~switch_path = Attached_SRs.reload state_path >>= fun () -> let datapath_root = root_dir // "datapath" in - FileWatcher.create datapath_root >>= fun datapath -> + DirWatcher.create datapath_root >>= fun datapath -> let volume_root = root_dir // "volume" in - FileWatcher.create volume_root >>= fun volume -> + DirWatcher.create volume_root >>= fun volume -> let rec retry_loop ((name, promise) as thread) () = Deferred.try_with promise >>= function | Ok () -> From ddc126d9862a70bbc484ac8f3ef379c4980ffabf Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Thu, 1 Aug 2024 17:29:40 +0100 Subject: [PATCH 077/141] xapi-storage-script: wait .5 seconds before reading from inotify This allows for several events to accumulate between events to avoid doing too many reloads while files are being moved about, while still being responsive enough. Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/main.ml | 1 + 1 file changed, 1 insertion(+) diff --git a/ocaml/xapi-storage-script/main.ml b/ocaml/xapi-storage-script/main.ml index 6ce7b996976..96c68e73a82 100644 --- a/ocaml/xapi-storage-script/main.ml +++ b/ocaml/xapi-storage-script/main.ml @@ -1811,6 +1811,7 @@ let reload root ~create ~destroy = function Lwt.return_unit let rec watch_loop pipe root ~create ~destroy = + let* () = Lwt_unix.sleep 0.5 in let* () = let* events = DirWatcher.read pipe in reload root ~create ~destroy (actions_from events) From 6b835a1637d2d4f5e1c825967c6fe2e41358124b Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 17 May 2024 17:20:16 +0100 Subject: [PATCH 078/141] xapi-storage-script: test Sys module Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/dune | 6 ++ ocaml/xapi-storage-script/lib.mli | 4 + ocaml/xapi-storage-script/test_lib.ml | 143 +++++++++++++++++++++++++ ocaml/xapi-storage-script/test_lib.mli | 0 4 files changed, 153 insertions(+) create mode 100644 ocaml/xapi-storage-script/test_lib.ml create mode 100644 ocaml/xapi-storage-script/test_lib.mli diff --git a/ocaml/xapi-storage-script/dune b/ocaml/xapi-storage-script/dune index 0f5c5ebb7de..06e912ee9bb 100644 --- a/ocaml/xapi-storage-script/dune +++ b/ocaml/xapi-storage-script/dune @@ -12,6 +12,12 @@ (preprocess (pps ppx_deriving_rpc)) ) +(test + (name test_lib) + (modules test_lib) + (libraries alcotest alcotest-lwt lwt fmt private) + ) + (executable (name main) (modules main) diff --git a/ocaml/xapi-storage-script/lib.mli b/ocaml/xapi-storage-script/lib.mli index 960c449e339..a55c4b81fbc 100644 --- a/ocaml/xapi-storage-script/lib.mli +++ b/ocaml/xapi-storage-script/lib.mli @@ -39,6 +39,10 @@ module Sys : sig string -> (unit, [> `missing of string | `not_executable of string * exn]) result Lwt.t + (** [assert_is_executable path] returns [Ok ()] when [path] is an executable + regular file, [Error `not_executable] when the file is a non-executable + regular file, and [Error `missing] otherwise. The [Errors] return the + queried path as a string. *) val read_file_contents : string -> string Lwt.t diff --git a/ocaml/xapi-storage-script/test_lib.ml b/ocaml/xapi-storage-script/test_lib.ml new file mode 100644 index 00000000000..e016d1368a4 --- /dev/null +++ b/ocaml/xapi-storage-script/test_lib.ml @@ -0,0 +1,143 @@ +(* Copyright (C) Cloud Software Group Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; version 2.1 only. with the special + exception on linking described in file LICENSE. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. +*) + +module Sys = Private.Lib.Sys +module Signal = Private.Lib.Signal +module Process = Private.Lib.Process + +let ( let* ) = Lwt.bind + +let test_content_rountrip = + let test () = + let contents = "yes" in + let path = Filename.temp_file "" "" in + let* () = Sys.save ~contents path in + let* result = Sys.read_file_contents path in + Alcotest.(check string) "Write and read roundtrip" contents result ; + Lwt.return () + in + ("Write and read file", `Quick, test) + +let test_readdir = + let test () = + let path = Filename.temp_file "" "" in + let filename = Filename.basename path in + let tmpdir = Filename.dirname path in + let* dir_contents = Sys.readdir tmpdir in + let file_present = List.exists (String.equal filename) dir_contents in + Alcotest.(check bool) "Temp file detected" true file_present ; + Lwt.return () + in + ("Read directory", `Quick, test) + +let test_assert_is_exec = + let test name path is_expected = + let* result = Sys.assert_is_executable path in + Alcotest.(check bool) name true (is_expected result) ; + Lwt.return () + in + let test () = + let path = "/missing/path" in + let is_expected = function + | Error (`missing p) -> + Alcotest.(check string) "Missing paths match" path p ; + true + | _ -> + false + in + let* () = test "File is missing" path is_expected in + + let path = Filename.temp_file "" "" in + let is_expected = function + | Error (`not_executable (p, _)) -> + Alcotest.(check string) "Non-exec paths match" path p ; + true + | _ -> + false + in + let* () = test "File is not executable" path is_expected in + + let* () = Lwt_unix.chmod path 0o700 in + let is_expected = function Ok () -> true | _ -> false in + let* () = test "File is now executable" path is_expected in + + Lwt.return () + in + ("Executable file detection", `Quick, test) + +let test_sys = + ("Sys", [test_content_rountrip; test_readdir; test_assert_is_exec]) + +let exit_or_signal_pp ppf es = + match es with + | Process.Output.Signal s -> + Fmt.pf ppf "Signal %s" (Signal.to_string s) + | Process.Output.Exit_non_zero int -> + Fmt.pf ppf "Exit %i" int + +let output_pp = + let module O = Process.Output in + let module Dump = Fmt.Dump in + Dump.record + [ + Dump.field "exit_status" + (fun t -> t.O.exit_status) + (Dump.result ~ok:Fmt.(any "()") ~error:exit_or_signal_pp) + ; Dump.field "stdout" (fun t -> t.O.stdout) Dump.string + ; Dump.field "stderr" (fun t -> t.O.stderr) Dump.string + ] + +let output_c = Alcotest.testable output_pp Stdlib.( = ) + +let test_run_status = + let module P = Process in + let test () = + let* output = P.run ~prog:"true" ~args:[] ~input:"" ~env:[] in + let expected = P.Output.{exit_status= Ok (); stdout= ""; stderr= ""} in + Alcotest.(check output_c) "Exit status is correct" expected output ; + + let* output = P.run ~prog:"false" ~args:[] ~input:"" ~env:[] in + let expected = + P.Output.{exit_status= Error (Exit_non_zero 1); stdout= ""; stderr= ""} + in + Alcotest.(check output_c) "Exit status is correct" expected output ; + + Lwt.return () + in + ("Run's exit status", `Quick, test) + +let test_run_output = + let module P = Process in + let test () = + let content = "@@@@@@" in + let* output = P.run ~prog:"cat" ~args:["-"] ~input:content ~env:[] in + let expected = P.Output.{exit_status= Ok (); stdout= content; stderr= ""} in + Alcotest.(check output_c) "Stdout is correct" expected output ; + + let* output = P.run ~prog:"cat" ~args:[content] ~input:content ~env:[] in + let stderr = + Printf.sprintf "cat: %s: No such file or directory\n" content + in + let expected = + P.Output.{exit_status= Error (Exit_non_zero 1); stdout= ""; stderr} + in + Alcotest.(check output_c) "Stderr is correct" expected output ; + Lwt.return () + in + ("Run output collection", `Quick, test) + +let test_proc = ("Process", [test_run_status; test_run_output]) + +let tests = [test_sys; test_proc] + +let () = Lwt_main.run @@ Alcotest_lwt.run "xapi-storage-script lib" tests diff --git a/ocaml/xapi-storage-script/test_lib.mli b/ocaml/xapi-storage-script/test_lib.mli new file mode 100644 index 00000000000..e69de29bb2d From 121c9623ba95bd542b9da22b4e53381d5cf569d2 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Mon, 30 Sep 2024 10:53:52 +0100 Subject: [PATCH 079/141] xapi-storage-script: close file descriptors after launching processes This is done by using Lwt_process.with_process_full, instead of manually managing them. Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/lib.ml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ocaml/xapi-storage-script/lib.ml b/ocaml/xapi-storage-script/lib.ml index f9dba17040a..9c9059432bf 100644 --- a/ocaml/xapi-storage-script/lib.ml +++ b/ocaml/xapi-storage-script/lib.ml @@ -146,7 +146,7 @@ module Process = struct Error (Signal n) end - let create ~env ~prog ~args = + let with_process ~env ~prog ~args f = let args = Array.of_list (prog :: args) in let cmd = (prog, args) in @@ -164,7 +164,7 @@ module Process = struct |> Array.of_seq in - Lwt_process.open_process_full ~env cmd + Lwt_process.with_process_full ~env cmd f let close chan () = Lwt_io.close chan @@ -174,7 +174,8 @@ module Process = struct let receive chan = Lwt.finalize (fun () -> Lwt_io.read chan) (close chan) let run ~env ~prog ~args ~input = - let p = create ~env ~prog ~args in + let ( let@ ) f x = f x in + let@ p = with_process ~env ~prog ~args in let sender = send p#stdin input in let receiver_out = receive p#stdout in let receiver_err = receive p#stderr in From 09f8784461b8ee5520f4102ebd3a72b5ae07505e Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Mon, 30 Sep 2024 16:14:25 +0100 Subject: [PATCH 080/141] maintenance: ignore another formatting commit Signed-off-by: Pau Ruiz Safont --- .git-blame-ignore-revs | 1 + 1 file changed, 1 insertion(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 739b485ae74..0bf65995261 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -31,6 +31,7 @@ cbb9edb17dfd122c591beb14d1275acc39492335 d6ab15362548b8fe270bd14d5153b8d94e1b15c0 b12cf444edea15da6274975e1b2ca6a7fce2a090 364c27f5d18ab9dd31825e67a93efabecad06823 +d8b4de9076531dd13bdffa20cc10c72290a52356 # ocp-indent d018d26d6acd4707a23288b327b49e44f732725e From 9d757c2dd350cc3680b22d4a3e7e40e25b6aed74 Mon Sep 17 00:00:00 2001 From: Colin James Date: Tue, 1 Oct 2024 13:02:04 +0100 Subject: [PATCH 081/141] Prefer concat_map We attempt to replace every occurrence of List.flatten with List.concat, and subsequently try to replace as many occurrences of the pattern: concat (map f xs) with concat_map f xs. Squashed: - Use concat_map in xapi-cli-server - Use concat_map in mpathalert - Use concat_map in sdk-gen - Use concat_map in xenopsd - Use concat_map in idl/ - Use concat_map in perftest - Use concat_map in xapi - Use concat_map in networkd - Use concat_map in xcp-rrdd - Use concat_map in nbd - Use concat_map in xapi-idl - Use concat_map in libs - Use concat_map in doc Signed-off-by: Colin James --- doc/content/xapi/storage/sxm.md | 4 +- ocaml/idl/datamodel_utils.ml | 47 ++-- ocaml/idl/dm_api.ml | 9 +- ocaml/idl/dot_backend.ml | 183 ++++++------ ocaml/idl/dtd_backend.ml | 10 +- ocaml/idl/ocaml_backend/gen_api.ml | 24 +- ocaml/idl/ocaml_backend/gen_client.ml | 5 +- ocaml/idl/ocaml_backend/gen_db_actions.ml | 2 +- ocaml/idl/ocaml_backend/gen_server.ml | 2 +- ocaml/idl/ocaml_backend/gen_test.ml | 46 ++- ocaml/idl/ocaml_backend/ocaml_syntax.ml | 4 +- ocaml/libs/vhd/vhd_format/f.ml | 4 +- ocaml/libs/vhd/vhd_format/patterns.ml | 2 +- ocaml/libs/vhd/vhd_format_lwt/block.ml | 2 +- ocaml/libs/xapi-rrd/lib/rrd_updates.ml | 4 +- .../lib/xapi-stdext-std/xstringext_test.ml | 7 +- ocaml/mpathalert/mpathalert.ml | 24 +- ocaml/nbd/src/main.ml | 3 +- ocaml/networkd/bin/network_server.ml | 10 +- ocaml/networkd/bin_db/networkd_db.ml | 39 ++- ocaml/networkd/lib/network_utils.ml | 112 +++----- ocaml/perftest/cumulative_time.ml | 2 +- ocaml/perftest/graphutil.ml | 14 +- ocaml/perftest/tests.ml | 262 +++++++++--------- ocaml/sdk-gen/csharp/gen_csharp_binding.ml | 12 +- ocaml/sdk-gen/java/main.ml | 7 +- ocaml/xapi-cli-server/cli_operations.ml | 102 +++---- ocaml/xapi-idl/lib_test/idl_test_common.ml | 52 ++-- ocaml/xapi/binpack.ml | 14 +- ocaml/xapi/eventgen.ml | 60 ++-- ocaml/xapi/extauth_plugin_ADpbis.ml | 8 +- ocaml/xapi/hashtbl_xml.ml | 4 +- ocaml/xapi/message_forwarding.ml | 2 +- ocaml/xapi/monitor_master.ml | 3 +- ocaml/xapi/monitor_mem_host.ml | 72 +++-- ocaml/xapi/nm.ml | 3 +- ocaml/xapi/repository.ml | 3 +- ocaml/xapi/storage_smapiv1.ml | 3 +- ocaml/xapi/storage_smapiv1_wrapper.ml | 4 +- ocaml/xapi/valid_ref_list.ml | 2 +- ocaml/xapi/xapi_bond.ml | 48 ++-- ocaml/xapi/xapi_clustering.ml | 5 +- ocaml/xapi/xapi_guest_agent.ml | 30 +- ocaml/xapi/xapi_ha_vm_failover.ml | 24 +- ocaml/xapi/xapi_host.ml | 3 +- ocaml/xapi/xapi_host_helpers.ml | 3 +- ocaml/xapi/xapi_pbd.ml | 2 +- ocaml/xapi/xapi_pci.ml | 2 +- ocaml/xapi/xapi_pool.ml | 28 +- ocaml/xapi/xapi_pvs_server.ml | 2 +- ocaml/xapi/xapi_vbd_helpers.ml | 10 +- ocaml/xapi/xapi_vgpu_type.ml | 2 +- ocaml/xapi/xapi_vm_helpers.ml | 4 +- ocaml/xapi/xapi_vm_migrate.ml | 16 +- ocaml/xapi/xapi_xenops.ml | 9 +- ocaml/xapi/xha_interface.ml | 56 ++-- ocaml/xcp-rrdd/bin/rrdd/rrdd_monitor.ml | 24 +- ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml | 2 +- ocaml/xcp-rrdd/bin/rrdp-iostat/rrdp_iostat.ml | 51 ++-- ocaml/xcp-rrdd/bin/rrdp-xenpm/rrdp_xenpm.ml | 2 +- ocaml/xenopsd/lib/xenops_server.ml | 12 +- ocaml/xenopsd/test/test_topology.ml | 4 +- ocaml/xenopsd/xc/device.ml | 110 ++++---- ocaml/xenopsd/xc/device_common.ml | 131 ++++----- ocaml/xenopsd/xc/domain.ml | 3 +- ocaml/xenopsd/xc/xenops_server_xen.ml | 2 +- 66 files changed, 796 insertions(+), 961 deletions(-) diff --git a/doc/content/xapi/storage/sxm.md b/doc/content/xapi/storage/sxm.md index ee3b90276cc..6c44e432d22 100644 --- a/doc/content/xapi/storage/sxm.md +++ b/doc/content/xapi/storage/sxm.md @@ -230,8 +230,8 @@ Next, we determine which VDIs to copy: let vifs = Db.VM.get_VIFs ~__context ~self:vm in let snapshots = Db.VM.get_snapshots ~__context ~self:vm in let vm_and_snapshots = vm :: snapshots in - let snapshots_vbds = List.flatten (List.map (fun self -> Db.VM.get_VBDs ~__context ~self) snapshots) in - let snapshot_vifs = List.flatten (List.map (fun self -> Db.VM.get_VIFs ~__context ~self) snapshots) in + let snapshots_vbds = List.concat_map (fun self -> Db.VM.get_VBDs ~__context ~self) snapshots in + let snapshot_vifs = List.concat_map (fun self -> Db.VM.get_VIFs ~__context ~self) snapshots in ``` we now decide whether we're intra-pool or not, and if we're intra-pool whether we're migrating onto the same host (localhost migrate). Intra-pool is decided by trying to do a lookup of our current host uuid on the destination pool. diff --git a/ocaml/idl/datamodel_utils.ml b/ocaml/idl/datamodel_utils.ml index 6f220c6b53b..080d9059ab8 100644 --- a/ocaml/idl/datamodel_utils.ml +++ b/ocaml/idl/datamodel_utils.ml @@ -38,7 +38,7 @@ module Types = struct | Field f -> [f.ty] | Namespace (_, fields) -> - List.concat (List.map of_content fields) + List.concat_map of_content fields (** Decompose a recursive type into a list of component types (eg a Set(String) -> String :: Set(String) ) *) @@ -62,10 +62,10 @@ module Types = struct (** All types in a list of objects (automatically decomposes) *) let of_objects system = - let fields = List.concat (List.map (fun x -> x.contents) system) in - let field_types = List.concat (List.map of_content fields) in + let fields = List.concat_map (fun x -> x.contents) system in + let field_types = List.concat_map of_content fields in - let messages = List.concat (List.map (fun x -> x.messages) system) in + let messages = List.concat_map (fun x -> x.messages) system in let return_types = let aux accu msg = match msg.msg_result with None -> accu | Some (ty, _) -> ty :: accu @@ -73,9 +73,8 @@ module Types = struct List.fold_left aux [] messages in let param_types = - List.map - (fun p -> p.param_type) - (List.concat (List.map (fun x -> x.msg_params) messages)) + List.(concat_map (fun x -> map (fun p -> p.param_type) x.msg_params)) + messages in let selves = List.map (fun obj -> Ref obj.name) system in let set_self = List.map (fun t -> Set t) selves in @@ -84,7 +83,7 @@ module Types = struct Listext.List.setify (selves @ set_self @ field_types @ return_types @ param_types) in - Listext.List.setify (List.concat (List.map decompose all)) + Listext.List.setify (List.concat_map decompose all) end (** Functions for processing relationships from the model *) @@ -124,18 +123,16 @@ module Relations = struct let other_end_of api ((a, b) as one_end) = let rels = relations_of_api api in match - List.concat - (List.map - (function - | x, other_end when x = one_end -> - [other_end] - | other_end, x when x = one_end -> - [other_end] - | _ -> - [] - ) - rels - ) + List.concat_map + (function + | x, other_end when x = one_end -> + [other_end] + | other_end, x when x = one_end -> + [other_end] + | _ -> + [] + ) + rels with | [other_end] -> other_end @@ -155,11 +152,11 @@ end let fields_of_obj (x : obj) : field list = let rec of_contents = function | Namespace (_, xs) -> - List.concat (List.map of_contents xs) + List.concat_map of_contents xs | Field x -> [x] in - List.concat (List.map of_contents x.contents) + List.concat_map of_contents x.contents (* True if an object has a label (and therefore should have a get_by_name_label message *) let obj_has_get_by_name_label x = @@ -784,7 +781,7 @@ let messages_of_obj (x : obj) document_order : message list = messages @ get_all_public @ [get_all] - @ List.concat (List.map (all_new_messages_of_field x) all_fields) + @ List.concat_map (all_new_messages_of_field x) all_fields @ constructor_destructor @ [uuid; get_record] @ name_label @@ -793,8 +790,8 @@ let messages_of_obj (x : obj) document_order : message list = [get_record; get_record_internal; get_all; uuid] @ constructor_destructor @ name_label - @ List.concat (List.map (new_messages_of_field x 0) all_fields) - @ List.concat (List.map (new_messages_of_field x 1) all_fields) + @ List.concat_map (new_messages_of_field x 0) all_fields + @ List.concat_map (new_messages_of_field x 1) all_fields @ messages @ get_all_public diff --git a/ocaml/idl/dm_api.ml b/ocaml/idl/dm_api.ml index a35bedaa957..15d5eb4bfe8 100644 --- a/ocaml/idl/dm_api.ml +++ b/ocaml/idl/dm_api.ml @@ -79,12 +79,11 @@ let field_exists api ~objname ~fieldname = *) let filter_field (pred : field -> bool) (system : obj list) = (* NB using lists rather than options - maybe change later? *) - let concat_map f xs = List.concat (List.map f xs) in let rec content = function | Field field as x -> if pred field then [x] else [] | Namespace (name, contents) -> - [Namespace (name, concat_map content contents)] + [Namespace (name, List.concat_map content contents)] in (* remove empty /leaf/ namespaces *) let rec remove_leaf = function @@ -93,7 +92,7 @@ let filter_field (pred : field -> bool) (system : obj list) = | Namespace (_, []) -> [] (* no children so removed *) | Namespace (name, contents) -> - [Namespace (name, concat_map remove_leaf contents)] + [Namespace (name, List.concat_map remove_leaf contents)] in let rec fixpoint f x = let result = f x in @@ -103,8 +102,8 @@ let filter_field (pred : field -> bool) (system : obj list) = { x with contents= - (let contents = concat_map content x.contents in - fixpoint (concat_map remove_leaf) contents + (let contents = List.concat_map content x.contents in + fixpoint (List.concat_map remove_leaf) contents ) } in diff --git a/ocaml/idl/dot_backend.ml b/ocaml/idl/dot_backend.ml index a67879fa65d..1d1ca7811ce 100644 --- a/ocaml/idl/dot_backend.ml +++ b/ocaml/idl/dot_backend.ml @@ -34,107 +34,100 @@ let rec all_field_types = function | Field fr -> [(fr.field_name, fr.ty)] | Namespace (_, xs) -> - List.concat (List.map all_field_types xs) + List.concat_map all_field_types xs let of_objs api = let xs = objects_of_api api and relations = relations_of_api api in let names : string list = List.map (fun x -> x.name) xs in let edges : string list = - List.concat - (List.map - (fun (obj : obj) -> - (* First consider the edges defined as relational *) - let relational = - List.filter (fun ((a, _), _) -> a = obj.name) relations - in - let edges = - List.map - (fun ((a, a_field_name), (b, b_field_name)) -> - let a_field = - get_field_by_name api ~objname:a ~fieldname:a_field_name - and b_field = - get_field_by_name api ~objname:b ~fieldname:b_field_name - in - let get_arrow which obj ty = - match Relations.of_types (Ref obj) ty with - | `None -> - failwith - (sprintf - "bad relational edge between %s.%s and %s.%s; \ - object name [%s] never occurs in [%s]" - a a_field_name b b_field_name obj - (Types.to_string ty) - ) - | `One -> - [which ^ "=\"none\""] - | `Many -> - [which ^ "=\"crow\""] - in - let labels = - [(* "label=\"" ^ label ^ "\"";*) "color=\"blue\""] - @ get_arrow "arrowhead" b a_field.ty - @ get_arrow "arrowtail" a b_field.ty - in - sprintf "%s -> %s [ %s ]" a b (String.concat ", " labels) - ) - relational - in - (* list of pairs of (field name, type) *) - let name_types : (string * ty) list = - List.concat (List.map all_field_types obj.contents) - in - (* get rid of all those which are defined as relational *) - let name_types = - List.filter - (fun (name, _) -> - List.filter - (fun ((a, a_name), (b, b_name)) -> - (a = obj.name && a_name = name) - || (b = obj.name && b_name = name) - ) - relations - = [] - ) - name_types - in - (* decompose each ty into a list of references *) - let name_refs : (string * string * ty) list = - List.concat - (List.map - (fun (name, ty) -> - List.map (fun x -> (name, x, ty)) (all_refs ty) - ) - name_types - ) - in - let name_names : (string * string) list = - List.map - (fun (name, obj, ty) -> - let count = - match Relations.of_types (Ref obj) ty with - | `None -> - "(0)" - | `One -> - "(1)" - | `Many -> - "(*)" - in - (name ^ count, obj) - ) - name_refs - in - let edges = - List.map - (fun (field, target) -> - sprintf "%s -> %s [ label=\"%s\" ]" obj.name target field - ) - name_names - @ edges - in - edges - ) - xs + List.concat_map + (fun (obj : obj) -> + (* First consider the edges defined as relational *) + let relational = + List.filter (fun ((a, _), _) -> a = obj.name) relations + in + let edges = + List.map + (fun ((a, a_field_name), (b, b_field_name)) -> + let a_field = + get_field_by_name api ~objname:a ~fieldname:a_field_name + and b_field = + get_field_by_name api ~objname:b ~fieldname:b_field_name + in + let get_arrow which obj ty = + match Relations.of_types (Ref obj) ty with + | `None -> + failwith + (sprintf + "bad relational edge between %s.%s and %s.%s; object \ + name [%s] never occurs in [%s]" + a a_field_name b b_field_name obj (Types.to_string ty) + ) + | `One -> + [which ^ "=\"none\""] + | `Many -> + [which ^ "=\"crow\""] + in + let labels = + [(* "label=\"" ^ label ^ "\"";*) "color=\"blue\""] + @ get_arrow "arrowhead" b a_field.ty + @ get_arrow "arrowtail" a b_field.ty + in + sprintf "%s -> %s [ %s ]" a b (String.concat ", " labels) + ) + relational + in + (* list of pairs of (field name, type) *) + let name_types : (string * ty) list = + List.concat_map all_field_types obj.contents + in + (* get rid of all those which are defined as relational *) + let name_types = + List.filter + (fun (name, _) -> + List.filter + (fun ((a, a_name), (b, b_name)) -> + (a = obj.name && a_name = name) + || (b = obj.name && b_name = name) + ) + relations + = [] + ) + name_types + in + (* decompose each ty into a list of references *) + let name_refs : (string * string * ty) list = + List.concat_map + (fun (name, ty) -> List.map (fun x -> (name, x, ty)) (all_refs ty)) + name_types + in + let name_names : (string * string) list = + List.map + (fun (name, obj, ty) -> + let count = + match Relations.of_types (Ref obj) ty with + | `None -> + "(0)" + | `One -> + "(1)" + | `Many -> + "(*)" + in + (name ^ count, obj) + ) + name_refs + in + let edges = + List.map + (fun (field, target) -> + sprintf "%s -> %s [ label=\"%s\" ]" obj.name target field + ) + name_names + @ edges + in + edges ) + xs in [ "digraph g{" diff --git a/ocaml/idl/dtd_backend.ml b/ocaml/idl/dtd_backend.ml index d820e2623ef..9fa7f6fd58d 100644 --- a/ocaml/idl/dtd_backend.ml +++ b/ocaml/idl/dtd_backend.ml @@ -99,11 +99,9 @@ let rec strings_of_dtd_element known_els = function Hashtbl.remove known_els name ; sprintf "%s%s>" prefix body :: (strings_of_attributes name attributes - @ List.concat - (List.map - (strings_of_dtd_element known_els) - (List.filter is_element els) - ) + @ List.concat_map + (strings_of_dtd_element known_els) + (List.filter is_element els) ) ) else [] @@ -166,4 +164,4 @@ let of_objs api = let xs = objects_of_api api in let known_els = Hashtbl.create 10 in let elements = List.map (dtd_element_of_obj known_els) xs in - List.concat (List.map (strings_of_dtd_element known_els) elements) + List.concat_map (strings_of_dtd_element known_els) elements diff --git a/ocaml/idl/ocaml_backend/gen_api.ml b/ocaml/idl/ocaml_backend/gen_api.ml index 1caf9eee138..7bedb49eca8 100644 --- a/ocaml/idl/ocaml_backend/gen_api.ml +++ b/ocaml/idl/ocaml_backend/gen_api.ml @@ -285,20 +285,18 @@ let gen_client highapi = ) let add_set_enums types = - List.concat - (List.map - (fun ty -> - match ty with - | DT.Enum _ -> - if List.exists (fun ty2 -> ty2 = DT.Set ty) types then - [ty] - else - [DT.Set ty; ty] - | _ -> - [ty] - ) - types + List.concat_map + (fun ty -> + match ty with + | DT.Enum _ -> + if List.exists (fun ty2 -> ty2 = DT.Set ty) types then + [ty] + else + [DT.Set ty; ty] + | _ -> + [ty] ) + types let all_types_of highapi = DU.Types.of_objects (Dm_api.objects_of_api highapi) diff --git a/ocaml/idl/ocaml_backend/gen_client.ml b/ocaml/idl/ocaml_backend/gen_client.ml index d456dd9d5d8..0082f64a1d0 100644 --- a/ocaml/idl/ocaml_backend/gen_client.ml +++ b/ocaml/idl/ocaml_backend/gen_client.ml @@ -221,8 +221,9 @@ let gen_module api : O.Module.t = let fields_of = List.map (fun x -> O.Module.Let x) in let operations = List.map (fun x -> operation ~sync obj x) obj.messages in let helpers = - List.concat - (List.map (fun x -> helper_record_constructor ~sync obj x) obj.messages) + List.concat_map + (fun x -> helper_record_constructor ~sync obj x) + obj.messages in let fields = fields_of (operations @ helpers) in (* diff --git a/ocaml/idl/ocaml_backend/gen_db_actions.ml b/ocaml/idl/ocaml_backend/gen_db_actions.ml index e0cc5cc8454..91c1d9a6ad2 100644 --- a/ocaml/idl/ocaml_backend/gen_db_actions.ml +++ b/ocaml/idl/ocaml_backend/gen_db_actions.ml @@ -586,7 +586,7 @@ let db_action api : O.Module.t = () in let all = Dm_api.objects_of_api api in - let modules = List.concat (List.map (fun x -> [obj x; obj_init x]) all) in + let modules = List.concat_map (fun x -> [obj x; obj_init x]) all in O.Module.make ~name:_db_action ~preamble: [ diff --git a/ocaml/idl/ocaml_backend/gen_server.ml b/ocaml/idl/ocaml_backend/gen_server.ml index e091e07b4d2..31e2bbe16f2 100644 --- a/ocaml/idl/ocaml_backend/gen_server.ml +++ b/ocaml/idl/ocaml_backend/gen_server.ml @@ -496,7 +496,7 @@ let gen_module api : O.Module.t = ; "Server_helpers.dispatch_exn_wrapper (fun () -> (match \ __call with " ] - @ List.flatten (List.map obj all_objs) + @ List.concat_map obj all_objs @ [ "| \"system.listMethods\" -> " ; " success (rpc_of_string_set [" diff --git a/ocaml/idl/ocaml_backend/gen_test.ml b/ocaml/idl/ocaml_backend/gen_test.ml index abf251014f0..70dc19a0fa6 100644 --- a/ocaml/idl/ocaml_backend/gen_test.ml +++ b/ocaml/idl/ocaml_backend/gen_test.ml @@ -75,30 +75,28 @@ let gen_test highapi = [ ["open API"] ; ["let _ ="] - ; List.concat - (List.map - (fun ty -> - [ - sprintf "let oc = open_out \"rpc-light_%s.xml\" in" - (OU.alias_of_ty ty) - ; sprintf "let x = %s in" (gen_test_type highapi ty) - ; sprintf - "Printf.fprintf oc \"%%s\" (Xmlrpc.to_string \ - (API.rpc_of_%s x));" - (OU.alias_of_ty ty) - ; "close_out oc;" - ; sprintf "let oc = open_out \"xml-light2_%s.xml\" in" - (OU.alias_of_ty ty) - ; sprintf - "Printf.fprintf oc \"%%s\" (Xml.to_string \ - (API.Legacy.To.%s x));" - (OU.alias_of_ty ty) - ; "close_out oc;" - (* sprintf "let s = Xml.to_string (API.Legacy.To.%s x) in" (OU.alias_of_ty ty);*) - (* sprintf "let y =" *) - ] - ) - all_types + ; List.concat_map + (fun ty -> + [ + sprintf "let oc = open_out \"rpc-light_%s.xml\" in" + (OU.alias_of_ty ty) + ; sprintf "let x = %s in" (gen_test_type highapi ty) + ; sprintf + "Printf.fprintf oc \"%%s\" (Xmlrpc.to_string (API.rpc_of_%s \ + x));" + (OU.alias_of_ty ty) + ; "close_out oc;" + ; sprintf "let oc = open_out \"xml-light2_%s.xml\" in" + (OU.alias_of_ty ty) + ; sprintf + "Printf.fprintf oc \"%%s\" (Xml.to_string (API.Legacy.To.%s \ + x));" + (OU.alias_of_ty ty) + ; "close_out oc;" + (* sprintf "let s = Xml.to_string (API.Legacy.To.%s x) in" (OU.alias_of_ty ty);*) + (* sprintf "let y =" *) + ] ) + all_types ] ) diff --git a/ocaml/idl/ocaml_backend/ocaml_syntax.ml b/ocaml/idl/ocaml_backend/ocaml_syntax.ml index 634b7477830..e52cce36523 100644 --- a/ocaml/idl/ocaml_backend/ocaml_syntax.ml +++ b/ocaml/idl/ocaml_backend/ocaml_syntax.ml @@ -153,7 +153,7 @@ module Module = struct [ List.map (fun x -> Line x) x.preamble ; (if x.letrec then [Line "let rec __unused () = ()"] else []) - ; List.concat (List.map e x.elements) + ; List.concat_map e x.elements ; List.map (fun x -> Line x) x.postamble ] in @@ -182,7 +182,7 @@ module Signature = struct else Line ("module " ^ x.name ^ " : sig") ) - ; Indent (List.concat (List.map e x.elements)) + ; Indent (List.concat_map e x.elements) ; Line "end" ] diff --git a/ocaml/libs/vhd/vhd_format/f.ml b/ocaml/libs/vhd/vhd_format/f.ml index e3bfc97a1fe..66b3e2f788e 100644 --- a/ocaml/libs/vhd/vhd_format/f.ml +++ b/ocaml/libs/vhd/vhd_format/f.ml @@ -285,7 +285,7 @@ module UTF16 = struct String.concat "" (List.map (fun c -> Printf.sprintf "%c" c) - (List.flatten (List.map utf8_chars_of_int (Array.to_list s))) + (List.concat_map utf8_chars_of_int (Array.to_list s)) ) let to_utf8 x = try Rresult.R.ok (to_utf8_exn x) with e -> Rresult.R.error e @@ -1543,7 +1543,7 @@ module Vhd = struct ) locators in - List.flatten locations @ blocks + List.concat locations @ blocks else blocks in diff --git a/ocaml/libs/vhd/vhd_format/patterns.ml b/ocaml/libs/vhd/vhd_format/patterns.ml index 1f575b00d19..942786854e3 100644 --- a/ocaml/libs/vhd/vhd_format/patterns.ml +++ b/ocaml/libs/vhd/vhd_format/patterns.ml @@ -90,7 +90,7 @@ let string_of_operation = function (string_of_choice p.sector) let descr_of_program p = - let lines = List.concat (List.map descr_of_operation p) in + let lines = List.concat_map descr_of_operation p in List.rev (fst (List.fold_left diff --git a/ocaml/libs/vhd/vhd_format_lwt/block.ml b/ocaml/libs/vhd/vhd_format_lwt/block.ml index b4574e14e28..a9dead185db 100644 --- a/ocaml/libs/vhd/vhd_format_lwt/block.ml +++ b/ocaml/libs/vhd/vhd_format_lwt/block.ml @@ -61,7 +61,7 @@ let to_sectors bufs = (Cstruct.sub remaining 0 available :: acc) (Cstruct.shift remaining available) in - List.concat (List.map (loop []) bufs) + List.concat_map (loop []) bufs let forall_sectors f offset bufs = let rec one offset = function diff --git a/ocaml/libs/xapi-rrd/lib/rrd_updates.ml b/ocaml/libs/xapi-rrd/lib/rrd_updates.ml index d9de5b045b5..af8b0f691d6 100644 --- a/ocaml/libs/xapi-rrd/lib/rrd_updates.ml +++ b/ocaml/libs/xapi-rrd/lib/rrd_updates.ml @@ -73,7 +73,7 @@ let create rra_timestep rras first_rra last_cdp_time first_cdp_time start let extract_row rra = List.map (fun ring -> Fring.peek ring i) (Array.to_list rra.rra_data) in - let values = List.concat (List.map extract_row rras) in + let values = List.concat_map extract_row rras in do_data (i + 1) ({time; row_data= Array.of_list values} :: accum) in @@ -283,7 +283,7 @@ let create_multi prefixandrrds start interval cfopt = ) in - let rras = List.flatten rras in + let rras = List.concat rras in (* The following timestep is that of the archive *) let rra_timestep = Int64.mul timestep (Int64.of_int first_rra.rra_pdp_cnt) in diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-std/xstringext_test.ml b/ocaml/libs/xapi-stdext/lib/xapi-stdext-std/xstringext_test.ml index 7d2766cbaf4..b0816e69ebb 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-std/xstringext_test.ml +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-std/xstringext_test.ml @@ -39,7 +39,7 @@ let test_rev_map = in let tests = (* Generate the product of the two lists to generate the tests *) - List.concat (List.map (fun func -> List.map (test func) spec_rev) spec_func) + List.concat_map (fun func -> List.map (test func) spec_rev) spec_func in ("rev_map", tests) @@ -83,8 +83,9 @@ let test_split = ] in let tests_limit = - List.map (fun (limit, spec) -> List.map (test ~limit) spec) specs_limit - |> List.concat + List.concat_map + (fun (limit, spec) -> List.map (test ~limit) spec) + specs_limit in ("split", List.concat [tests_no_limit; tests_limit]) diff --git a/ocaml/mpathalert/mpathalert.ml b/ocaml/mpathalert/mpathalert.ml index bea5ae2ee0a..3a5d2556bd1 100644 --- a/ocaml/mpathalert/mpathalert.ml +++ b/ocaml/mpathalert/mpathalert.ml @@ -257,25 +257,21 @@ let state_of_the_world rpc session_id = debug "Generating the current state of the world" ; let pbds = Client.PBD.get_all_records ~rpc ~session_id in let pbd_alerts = - List.flatten - (List.map - (fun (pbd_ref, pbd_rec) -> - create_pbd_alerts rpc session_id [] - (pbd_ref, pbd_rec, Unix.gettimeofday ()) - ) - pbds + List.concat_map + (fun (pbd_ref, pbd_rec) -> + create_pbd_alerts rpc session_id [] + (pbd_ref, pbd_rec, Unix.gettimeofday ()) ) + pbds in let hosts = Client.Host.get_all_records ~rpc ~session_id in let host_alerts = - List.flatten - (List.map - (fun (host_ref, host_rec) -> - create_host_alerts rpc session_id [] - (host_ref, host_rec, Unix.gettimeofday ()) - ) - hosts + List.concat_map + (fun (host_ref, host_rec) -> + create_host_alerts rpc session_id [] + (host_ref, host_rec, Unix.gettimeofday ()) ) + hosts in let alerts = List.filter diff --git a/ocaml/nbd/src/main.ml b/ocaml/nbd/src/main.ml index 5b5be77f03a..bfdcee6a43f 100644 --- a/ocaml/nbd/src/main.ml +++ b/ocaml/nbd/src/main.ml @@ -93,8 +93,7 @@ let xapi_says_use_tls () = let ask_xapi rpc session_id = Xen_api.Network.get_all_records ~rpc ~session_id >>= fun all_nets -> let all_porpoises = - List.map (fun (_str, net) -> net.API.network_purpose) all_nets - |> List.flatten + List.concat_map (fun (_str, net) -> net.API.network_purpose) all_nets in let tls = List.mem `nbd all_porpoises in let no_tls = List.mem `insecure_nbd all_porpoises in diff --git a/ocaml/networkd/bin/network_server.ml b/ocaml/networkd/bin/network_server.ml index d31d256ef92..d0b21a125d6 100644 --- a/ocaml/networkd/bin/network_server.ml +++ b/ocaml/networkd/bin/network_server.ml @@ -1136,14 +1136,13 @@ module Bridge = struct (fun () -> if from_cache then let ports = - List.concat - (List.map (fun (_, {ports; _}) -> ports) !config.bridge_config) + List.concat_map (fun (_, {ports; _}) -> ports) !config.bridge_config in List.map (fun (port, {interfaces; _}) -> (port, interfaces)) ports else match !backend_kind with | Openvswitch -> - List.concat (List.map Ovs.bridge_to_ports (Ovs.list_bridges ())) + List.concat_map Ovs.bridge_to_ports (Ovs.list_bridges ()) | Bridge -> raise (Network_error Not_implemented) ) @@ -1154,8 +1153,7 @@ module Bridge = struct (fun () -> if from_cache then let ports = - List.concat - (List.map (fun (_, {ports; _}) -> ports) !config.bridge_config) + List.concat_map (fun (_, {ports; _}) -> ports) !config.bridge_config in let names = List.map (fun (port, {interfaces; _}) -> (port, interfaces)) ports @@ -1164,7 +1162,7 @@ module Bridge = struct else match !backend_kind with | Openvswitch -> - List.concat (List.map Ovs.bridge_to_ports (Ovs.list_bridges ())) + List.concat_map Ovs.bridge_to_ports (Ovs.list_bridges ()) | Bridge -> raise (Network_error Not_implemented) ) diff --git a/ocaml/networkd/bin_db/networkd_db.ml b/ocaml/networkd/bin_db/networkd_db.ml index 22c91e852c1..f62021828fa 100644 --- a/ocaml/networkd/bin_db/networkd_db.ml +++ b/ocaml/networkd/bin_db/networkd_db.ml @@ -35,8 +35,7 @@ let _ = if List.mem_assoc !bridge config.bridge_config then ( let bridge_config = List.assoc !bridge config.bridge_config in let ifaces = - List.flatten - (List.map (fun (_, port) -> port.interfaces) bridge_config.ports) + List.concat_map (fun (_, port) -> port.interfaces) bridge_config.ports in Printf.printf "interfaces=%s\n" (String.concat "," ifaces) ; match bridge_config.vlan with @@ -58,16 +57,14 @@ let _ = | Static4 conf -> let mode = [("mode", "static")] in let addrs = - List.flatten - (List.map - (fun (ip, plen) -> - [ - ("ipaddr", Unix.string_of_inet_addr ip) - ; ("netmask", prefixlen_to_netmask plen) - ] - ) - conf + List.concat_map + (fun (ip, plen) -> + [ + ("ipaddr", Unix.string_of_inet_addr ip) + ; ("netmask", prefixlen_to_netmask plen) + ] ) + conf in let gateway = match interface_config.ipv4_gateway with @@ -105,19 +102,15 @@ let _ = | Static6 conf -> let mode = [("modev6", "static")] in let addrs = - List.flatten - (List.map - (fun (ip, plen) -> - [ - ( "ipv6addr" - , Unix.string_of_inet_addr ip - ^ "/" - ^ string_of_int plen - ) - ] - ) - conf + List.concat_map + (fun (ip, plen) -> + [ + ( "ipv6addr" + , Unix.string_of_inet_addr ip ^ "/" ^ string_of_int plen + ) + ] ) + conf in let gateway = match interface_config.ipv6_gateway with diff --git a/ocaml/networkd/lib/network_utils.ml b/ocaml/networkd/lib/network_utils.ml index fe371e694de..39417cf1177 100644 --- a/ocaml/networkd/lib/network_utils.ml +++ b/ocaml/networkd/lib/network_utils.ml @@ -1566,15 +1566,11 @@ module Ovs = struct in List.filter_map parse lines in - List.flatten - (List.map - (fun vif -> - create_port_arg - ?ty:(List.assoc_opt vif ifaces_with_type) - vif name - ) - existing_vifs + List.concat_map + (fun vif -> + create_port_arg ?ty:(List.assoc_opt vif ifaces_with_type) vif name ) + existing_vifs in let del_old_arg = let real_bridge_exists () = @@ -1746,32 +1742,26 @@ module Ovs = struct in (* Don't add new properties here, these use the legacy converter *) let extra_args_legacy = - List.flatten - (List.map get_prop_legacy - [ - ("updelay", "bond_updelay") - ; ("downdelay", "bond_downdelay") - ; ("miimon", "other-config:bond-miimon-interval") - ; ("use_carrier", "other-config:bond-detect-mode") - ; ("rebalance-interval", "other-config:bond-rebalance-interval") - ] - ) + List.concat_map get_prop_legacy + [ + ("updelay", "bond_updelay") + ; ("downdelay", "bond_downdelay") + ; ("miimon", "other-config:bond-miimon-interval") + ; ("use_carrier", "other-config:bond-detect-mode") + ; ("rebalance-interval", "other-config:bond-rebalance-interval") + ] and extra_args = - List.flatten - (List.map get_prop - [ - ("lacp-time", "other-config:lacp-time") - ; ("lacp-fallback-ab", "other-config:lacp-fallback-ab") - ] - ) + List.concat_map get_prop + [ + ("lacp-time", "other-config:lacp-time") + ; ("lacp-fallback-ab", "other-config:lacp-fallback-ab") + ] and per_iface_args = - List.flatten - (List.map get_prop - [ - ("lacp-aggregation-key", "other-config:lacp-aggregation-key") - ; ("lacp-actor-key", "other-config:lacp-actor-key") - ] - ) + List.concat_map get_prop + [ + ("lacp-aggregation-key", "other-config:lacp-aggregation-key") + ; ("lacp-actor-key", "other-config:lacp-actor-key") + ] and other_args = List.filter_map (fun (k, v) -> @@ -1801,11 +1791,9 @@ module Ovs = struct if per_iface_args = [] then [] else - List.flatten - (List.map - (fun iface -> ["--"; "set"; "interface"; iface] @ per_iface_args) - interfaces - ) + List.concat_map + (fun iface -> ["--"; "set"; "interface"; iface] @ per_iface_args) + interfaces in vsctl (["--"; "--may-exist"; "add-bond"; bridge; name] @@ -1841,26 +1829,24 @@ module Ovs = struct mac port ] | ports -> - List.flatten - (List.map - (fun port -> - [ - Printf.sprintf - "idle_timeout=0,priority=0,in_port=local,arp,dl_src=%s,actions=NORMAL" - mac - ; Printf.sprintf - "idle_timeout=0,priority=0,in_port=local,dl_src=%s,actions=NORMAL" - mac - ; Printf.sprintf - "idle_timeout=0,priority=0,in_port=%s,arp,nw_proto=1,actions=local" - port - ; Printf.sprintf - "idle_timeout=0,priority=0,in_port=%s,dl_dst=%s,actions=local" - port mac - ] - ) - ports + List.concat_map + (fun port -> + [ + Printf.sprintf + "idle_timeout=0,priority=0,in_port=local,arp,dl_src=%s,actions=NORMAL" + mac + ; Printf.sprintf + "idle_timeout=0,priority=0,in_port=local,dl_src=%s,actions=NORMAL" + mac + ; Printf.sprintf + "idle_timeout=0,priority=0,in_port=%s,arp,nw_proto=1,actions=local" + port + ; Printf.sprintf + "idle_timeout=0,priority=0,in_port=%s,dl_dst=%s,actions=local" + port mac + ] ) + ports in List.iter (fun flow -> ignore (ofctl ["add-flow"; bridge; flow])) flows @@ -1903,22 +1889,12 @@ module Ethtool = struct let set_options name options = if options <> [] then ignore - (call - ("-s" - :: name - :: List.concat (List.map (fun (k, v) -> [k; v]) options) - ) - ) + (call ("-s" :: name :: List.concat_map (fun (k, v) -> [k; v]) options)) let set_offload name options = if options <> [] then ignore - (call - ("-K" - :: name - :: List.concat (List.map (fun (k, v) -> [k; v]) options) - ) - ) + (call ("-K" :: name :: List.concat_map (fun (k, v) -> [k; v]) options)) end module Dracut = struct diff --git a/ocaml/perftest/cumulative_time.ml b/ocaml/perftest/cumulative_time.ml index 9538056094b..5c7ff17d4e9 100644 --- a/ocaml/perftest/cumulative_time.ml +++ b/ocaml/perftest/cumulative_time.ml @@ -80,7 +80,7 @@ let _ = all ; (* Plot a line for (a) elapsed time and (b) this particular duration *) let ls = - List.flatten + List.concat (List.mapi (fun i ((info, _floats), output) -> let graph_one_label = diff --git a/ocaml/perftest/graphutil.ml b/ocaml/perftest/graphutil.ml index 2713dff321f..e2b0880ed46 100644 --- a/ocaml/perftest/graphutil.ml +++ b/ocaml/perftest/graphutil.ml @@ -30,13 +30,11 @@ let merge_infos (infos : info list) = in let floats ((file, result, subtest) as i) = ( i - , List.flatten - (List.map - (fun ((f, r, s), fl) -> - if file = f && result = r && subtest = s then fl else [] - ) - infos + , List.concat_map + (fun ((f, r, s), fl) -> + if file = f && result = r && subtest = s then fl else [] ) + infos ) in let merge_infos = List.map floats names in @@ -83,9 +81,9 @@ let get_info ?(separate = false) files : info list = | None -> [((f, "", ""), floats_from_file f)] | Some results -> - List.flatten (List.map (info_from_raw_result ~separate f) results) + List.concat_map (info_from_raw_result ~separate f) results in - merge_infos (List.flatten (List.map aux files)) + merge_infos (List.concat_map aux files) let short_info_to_string ((file, result, subtest) : short_info) = Printf.sprintf "%s.%s.%s" result subtest file diff --git a/ocaml/perftest/tests.ml b/ocaml/perftest/tests.ml index d0463e9f60a..731d0fa1200 100644 --- a/ocaml/perftest/tests.ml +++ b/ocaml/perftest/tests.ml @@ -43,7 +43,7 @@ let subtest_string key tag = let startall rpc session_id test = let vms = Client.VM.get_all_records ~rpc ~session_id in let tags = List.map (fun (_, vmr) -> vmr.API.vM_tags) vms in - let tags = Listext.List.setify (List.flatten tags) in + let tags = Listext.List.setify (List.concat tags) in List.map (fun tag -> debug "Starting VMs with tag: %s" tag ; @@ -167,25 +167,24 @@ let parallel_with_vms async_op opname n vms rpc session_id test subtest_name = in let events = List.map Event_helper.record_of_event events in let finished_tasks = - List.concat - (List.map - (function - | Event_helper.Task (t, Some t_rec) -> - if - t_rec.API.task_status <> `pending - || t_rec.API.task_current_operations <> [] - then - [t] - else - [] - | Event_helper.Task (t, None) -> - [t] - | _ -> - [] - ) - events - ) + List.concat_map + (function + | Event_helper.Task (t, Some t_rec) -> + if + t_rec.API.task_status <> `pending + || t_rec.API.task_current_operations <> [] + then + [t] + else + [] + | Event_helper.Task (t, None) -> + [t] + | _ -> + [] + ) + events in + finished := process_finished_tasks finished_tasks done with @@ -239,7 +238,7 @@ let parallel_with_vms async_op opname n vms rpc session_id test subtest_name = let parallel async_op opname n rpc session_id test = let vms = Client.VM.get_all_records ~rpc ~session_id in let tags = List.map (fun (_, vmr) -> vmr.API.vM_tags) vms in - let tags = Listext.List.setify (List.flatten tags) in + let tags = Listext.List.setify (List.concat tags) in Printf.printf "Tags are [%s]\n%!" (String.concat "; " tags) ; List.map (fun tag -> @@ -260,7 +259,7 @@ let parallel_stopall = parallel Client.Async.VM.hard_shutdown "stop" let stopall rpc session_id test = let vms = Client.VM.get_all_records ~rpc ~session_id in let tags = List.map (fun (_, vmr) -> vmr.API.vM_tags) vms in - let tags = Listext.List.setify (List.flatten tags) in + let tags = Listext.List.setify (List.concat tags) in List.map (fun tag -> debug "Starting VMs with tag: %s" tag ; @@ -304,121 +303,118 @@ let clone num_clones rpc session_id test = Printf.printf "Doing clone test\n%!" ; let vms = Client.VM.get_all_records ~rpc ~session_id in let tags = List.map (fun (_, vmr) -> vmr.API.vM_tags) vms in - let tags = Listext.List.setify (List.flatten tags) in + let tags = Listext.List.setify (List.concat tags) in Printf.printf "Tags are [%s]\n%!" (String.concat "; " tags) ; - List.flatten - (List.map - (fun tag -> - let vms = - List.filter (fun (_, vmr) -> List.mem tag vmr.API.vM_tags) vms - in - Printf.printf "We've got %d VMs\n%!" (List.length vms) ; - (* Start a thread to clone each one n times *) - let body (vm, vmr, res, clone_refs) = - let name_label = vmr.API.vM_name_label in - Printf.printf "Performing %d clones of '%s' within thread...\n%!" - num_clones name_label ; - for j = 0 to num_clones - 1 do - let result = - time (fun () -> - let clone = - Client.VM.clone ~rpc ~session_id ~vm ~new_name:"clone" - in - clone_refs := clone :: !clone_refs - ) - in - Printf.printf "clone %d of '%s' finished: %f\n%!" j name_label - result ; - res := result :: !res - done - in - let threads_and_results = - List.map - (fun (vm, vmr) -> - let res : float list ref = ref [] in - let clones : API.ref_VM list ref = ref [] in - let t = Thread.create body (vm, vmr, res, clones) in - (t, (res, clones)) - ) - vms - in - let threads, times_and_clones = List.split threads_and_results in - let times, clones = List.split times_and_clones in - Printf.printf "Waiting for threads to finish...\n%!" ; - List.iter (fun t -> Thread.join t) threads ; - Printf.printf "Threads have finished\n%!" ; - (* times is a list of (list of floats, each being the time to clone a VM), one per SR *) - let times = List.map (fun x -> !x) times in - Printf.printf "Times are: [%s]\n%!" - (String.concat ", " - (List.map - (fun x -> - Printf.sprintf "[%s]" - (String.concat ", " - (List.map (fun x -> Printf.sprintf "%f" x) x) - ) - ) - times + List.concat_map + (fun tag -> + let vms = + List.filter (fun (_, vmr) -> List.mem tag vmr.API.vM_tags) vms + in + Printf.printf "We've got %d VMs\n%!" (List.length vms) ; + (* Start a thread to clone each one n times *) + let body (vm, vmr, res, clone_refs) = + let name_label = vmr.API.vM_name_label in + Printf.printf "Performing %d clones of '%s' within thread...\n%!" + num_clones name_label ; + for j = 0 to num_clones - 1 do + let result = + time (fun () -> + let clone = + Client.VM.clone ~rpc ~session_id ~vm ~new_name:"clone" + in + clone_refs := clone :: !clone_refs + ) + in + Printf.printf "clone %d of '%s' finished: %f\n%!" j name_label result ; + res := result :: !res + done + in + let threads_and_results = + List.map + (fun (vm, vmr) -> + let res : float list ref = ref [] in + let clones : API.ref_VM list ref = ref [] in + let t = Thread.create body (vm, vmr, res, clones) in + (t, (res, clones)) + ) + vms + in + let threads, times_and_clones = List.split threads_and_results in + let times, clones = List.split times_and_clones in + Printf.printf "Waiting for threads to finish...\n%!" ; + List.iter (fun t -> Thread.join t) threads ; + Printf.printf "Threads have finished\n%!" ; + (* times is a list of (list of floats, each being the time to clone a VM), one per SR *) + let times = List.map (fun x -> !x) times in + Printf.printf "Times are: [%s]\n%!" + (String.concat ", " + (List.map + (fun x -> + Printf.sprintf "[%s]" + (String.concat ", " + (List.map (fun x -> Printf.sprintf "%f" x) x) + ) ) - ) ; - let clones = List.map (fun x -> !x) clones in - (* Output the results for cloning each gold VM as a separate record *) - let results = - List.map - (fun x -> - { - resultname= test.testname - ; subtest= subtest_string test.key tag - ; xenrtresult= List.fold_left ( +. ) 0.0 (List.flatten times) - ; rawresult= CloneTest x - } - ) - times - in - (* Best-effort clean-up *) - ignore_exn (fun () -> - Printf.printf "Cleaning up...\n%!" ; - (* Create a thread to clean up each set of clones *) - let threads = - List.mapi - (fun i clones -> - Thread.create - (fun clones -> - List.iteri - (fun j clone -> - Printf.printf "Thread %d destroying VM %d...\n%!" i j ; - let vbds = - Client.VM.get_VBDs ~rpc ~session_id ~self:clone - in - let vdis = - List.map - (fun vbd -> - Client.VBD.get_VDI ~rpc ~session_id ~self:vbd - ) - vbds - in - List.iter - (fun vdi -> - Client.VDI.destroy ~rpc ~session_id ~self:vdi - ) - vdis ; - Client.VM.destroy ~rpc ~session_id ~self:clone - ) - clones - ) - clones - ) - clones - in - Printf.printf "Waiting for clean-up threads to finish...\n%!" ; - List.iter (fun t -> Thread.join t) threads ; - Printf.printf "Clean-up threads have finished\n%!" - ) ; - (* Finally, return the results *) - results - ) - tags + times + ) + ) ; + let clones = List.map (fun x -> !x) clones in + (* Output the results for cloning each gold VM as a separate record *) + let results = + List.map + (fun x -> + { + resultname= test.testname + ; subtest= subtest_string test.key tag + ; xenrtresult= List.fold_left ( +. ) 0.0 (List.concat times) + ; rawresult= CloneTest x + } + ) + times + in + (* Best-effort clean-up *) + ignore_exn (fun () -> + Printf.printf "Cleaning up...\n%!" ; + (* Create a thread to clean up each set of clones *) + let threads = + List.mapi + (fun i clones -> + Thread.create + (fun clones -> + List.iteri + (fun j clone -> + Printf.printf "Thread %d destroying VM %d...\n%!" i j ; + let vbds = + Client.VM.get_VBDs ~rpc ~session_id ~self:clone + in + let vdis = + List.map + (fun vbd -> + Client.VBD.get_VDI ~rpc ~session_id ~self:vbd + ) + vbds + in + List.iter + (fun vdi -> + Client.VDI.destroy ~rpc ~session_id ~self:vdi + ) + vdis ; + Client.VM.destroy ~rpc ~session_id ~self:clone + ) + clones + ) + clones + ) + clones + in + Printf.printf "Waiting for clean-up threads to finish...\n%!" ; + List.iter (fun t -> Thread.join t) threads ; + Printf.printf "Clean-up threads have finished\n%!" + ) ; + (* Finally, return the results *) + results ) + tags let recordssize rpc session_id test = let doxmlrpctest (subtestname, testfn) = diff --git a/ocaml/sdk-gen/csharp/gen_csharp_binding.ml b/ocaml/sdk-gen/csharp/gen_csharp_binding.ml index aa65b99b4c3..bbf3360c897 100644 --- a/ocaml/sdk-gen/csharp/gen_csharp_binding.ml +++ b/ocaml/sdk-gen/csharp/gen_csharp_binding.ml @@ -382,7 +382,7 @@ and gen_class out_chan cls = gen_overloads generator message in let all_methods = - messages |> List.map (gen_exposed_method_overloads cls) |> List.concat + messages |> List.concat_map (gen_exposed_method_overloads cls) in List.iter (print "%s") all_methods ; List.iter (gen_exposed_field out_chan cls) contents ; @@ -581,7 +581,7 @@ and exposed_call_params message classname params = (* 'messages' are methods, 'contents' are fields *) and gen_save_changes out_chan exposed_class_name messages contents = - let fields = List.flatten (List.map flatten_content contents) in + let fields = List.concat_map flatten_content contents in let fields2 = List.filter (fun fr -> fr.qualifier == RW && not (List.mem "public" fr.full_name)) @@ -620,7 +620,7 @@ and flatten_content content = | Field fr -> [fr] | Namespace (_, c) -> - List.flatten (List.map flatten_content c) + List.concat_map flatten_content c and gen_save_changes_to_field out_chan exposed_class_name fr = let print format = fprintf out_chan format in @@ -675,9 +675,7 @@ and gen_exposed_field out_chan cls content = List.iter (gen_exposed_field out_chan cls) c and gen_proxy protocol = - let all_methods = - classes |> List.map gen_proxy_class_methods |> List.concat - in + let all_methods = classes |> List.concat_map gen_proxy_class_methods in match protocol with | CommonFunctions.JsonRpc -> let json_method x = `O [("client_method", `String x)] in @@ -690,7 +688,7 @@ and gen_proxy_class_methods {name; messages; _} = let generator params = gen_proxy_method name message params in gen_overloads generator message in - messages |> List.map (gen_message_overloads name) |> List.concat + messages |> List.concat_map (gen_message_overloads name) and gen_proxy_method classname message params = let proxy_msg_name = proxy_msg_name classname message in diff --git a/ocaml/sdk-gen/java/main.ml b/ocaml/sdk-gen/java/main.ml index b025e434964..3b7db08745b 100644 --- a/ocaml/sdk-gen/java/main.ml +++ b/ocaml/sdk-gen/java/main.ml @@ -737,9 +737,9 @@ let get_class_fields_json cls = ] ] | Namespace (name, contents) -> - List.flatten (List.map (fun c -> content_fields c name) contents) + List.concat_map (fun c -> content_fields c name) contents in - List.flatten (List.map (fun c -> content_fields c "") cls.contents) + List.concat_map (fun c -> content_fields c "") cls.contents (** [get_all_message_variants messages acc] takes a list of messages [messages] and an accumulator [acc], and recursively constructs a list of tuples representing both asynchronous and synchronous variants of each message, @@ -768,12 +768,11 @@ let rec get_all_message_variants messages acc = (fun (message, is_async) -> (message, is_async, [])) messages | _ -> - List.map + List.concat_map (fun (message, is_async) -> List.map (fun param -> (message, is_async, param)) params ) messages - |> List.flatten in if h.msg_async then get_variants [(h, false); (h, true)] @ get_all_message_variants tail acc diff --git a/ocaml/xapi-cli-server/cli_operations.ml b/ocaml/xapi-cli-server/cli_operations.ml index d0d981309da..aa3bf08c05a 100644 --- a/ocaml/xapi-cli-server/cli_operations.ml +++ b/ocaml/xapi-cli-server/cli_operations.ml @@ -1460,11 +1460,9 @@ let pool_ha_compute_vm_failover_plan printer rpc session_id params = in (* For now select all VMs resident on the given hosts *) let vms = - List.concat - (List.map - (fun host -> Client.Host.get_resident_VMs ~rpc ~session_id ~self:host) - hosts - ) + List.concat_map + (fun host -> Client.Host.get_resident_VMs ~rpc ~session_id ~self:host) + hosts in let vms = List.filter @@ -1590,32 +1588,26 @@ let pool_eject fd printer rpc session_id params = let pbds = Client.Host.get_PBDs ~rpc ~session_id ~self:host in (* Find the subset of SRs which cannot be seen from other hosts *) let srs = - List.concat - (List.map - (fun pbd -> - try - let sr = Client.PBD.get_SR ~rpc ~session_id ~self:pbd in - let other_pbds = - Client.SR.get_PBDs ~rpc ~session_id ~self:sr - in - let other_hosts = - List.map - (fun pbd -> - Client.PBD.get_host ~rpc ~session_id ~self:pbd - ) - other_pbds - in - let other_hosts_than_me = - List.filter (fun other -> other <> host) other_hosts - in - if other_hosts_than_me = [] then - [sr] - else - [] - with _ -> [] - ) - pbds + List.concat_map + (fun pbd -> + try + let sr = Client.PBD.get_SR ~rpc ~session_id ~self:pbd in + let other_pbds = Client.SR.get_PBDs ~rpc ~session_id ~self:sr in + let other_hosts = + List.map + (fun pbd -> Client.PBD.get_host ~rpc ~session_id ~self:pbd) + other_pbds + in + let other_hosts_than_me = + List.filter (fun other -> other <> host) other_hosts + in + if other_hosts_than_me = [] then + [sr] + else + [] + with _ -> [] ) + pbds in let warnings = ref [] in List.iter @@ -4169,25 +4161,23 @@ let vm_uninstall_common fd _printer rpc session_id params vms = in (* NB If a VDI is deleted then the VBD may be GCed at any time. *) let vdis = - List.concat - (List.map - (fun vbd -> - try - (* We only destroy VDIs where VBD.other_config contains 'owner' *) - let other_config = - Client.VBD.get_other_config ~rpc ~session_id ~self:vbd - in - let vdi = Client.VBD.get_VDI ~rpc ~session_id ~self:vbd in - (* Double-check the VDI actually exists *) - ignore (Client.VDI.get_uuid ~rpc ~session_id ~self:vdi) ; - if List.mem_assoc Constants.owner_key other_config then - [vdi] - else - [] - with _ -> [] - ) - vbds + List.concat_map + (fun vbd -> + try + (* We only destroy VDIs where VBD.other_config contains 'owner' *) + let other_config = + Client.VBD.get_other_config ~rpc ~session_id ~self:vbd + in + let vdi = Client.VBD.get_VDI ~rpc ~session_id ~self:vbd in + (* Double-check the VDI actually exists *) + ignore (Client.VDI.get_uuid ~rpc ~session_id ~self:vdi) ; + if List.mem_assoc Constants.owner_key other_config then + [vdi] + else + [] + with _ -> [] ) + vbds in let suspend_VDI = try @@ -4227,11 +4217,9 @@ let vm_uninstall fd printer rpc session_id params = do_vm_op printer rpc session_id (fun vm -> vm.getref ()) params [] in let snapshots = - List.flatten - (List.map - (fun vm -> Client.VM.get_snapshots ~rpc ~session_id ~self:vm) - vms - ) + List.concat_map + (fun vm -> Client.VM.get_snapshots ~rpc ~session_id ~self:vm) + vms in vm_uninstall_common fd printer rpc session_id params (vms @ snapshots) @@ -6070,11 +6058,9 @@ let cd_list printer rpc session_id params = srs in let cd_vdis = - List.flatten - (List.map - (fun (self, _) -> Client.SR.get_VDIs ~rpc ~session_id ~self) - cd_srs - ) + List.concat_map + (fun (self, _) -> Client.SR.get_VDIs ~rpc ~session_id ~self) + cd_srs in let table cd = let record = vdi_record rpc session_id cd in diff --git a/ocaml/xapi-idl/lib_test/idl_test_common.ml b/ocaml/xapi-idl/lib_test/idl_test_common.ml index 0e039037f3b..8e907f3b402 100644 --- a/ocaml/xapi-idl/lib_test/idl_test_common.ml +++ b/ocaml/xapi-idl/lib_test/idl_test_common.ml @@ -139,42 +139,34 @@ module GenTestData (C : CONFIG) (M : MARSHALLER) = struct match t.Param.name with | Some n -> inner - (List.flatten - (List.map - (fun marshalled -> - match (marshalled, t.Param.typedef.Rpc.Types.ty) with - | Rpc.Enum [], Rpc.Types.Option _ -> - params - | Rpc.Enum [x], Rpc.Types.Option _ -> - List.map - (fun (named, unnamed) -> - ((n, x) :: named, unnamed) - ) - params - | _, _ -> - List.map - (fun (named, unnamed) -> - ((n, marshalled) :: named, unnamed) - ) - params - ) - marshalled + (List.concat_map + (fun marshalled -> + match (marshalled, t.Param.typedef.Rpc.Types.ty) with + | Rpc.Enum [], Rpc.Types.Option _ -> + params + | Rpc.Enum [x], Rpc.Types.Option _ -> + List.map + (fun (named, unnamed) -> ((n, x) :: named, unnamed)) + params + | _, _ -> + List.map + (fun (named, unnamed) -> + ((n, marshalled) :: named, unnamed) + ) + params ) + marshalled ) f | None -> inner - (List.flatten - (List.map - (fun marshalled -> - List.map - (fun (named, unnamed) -> - (named, marshalled :: unnamed) - ) - params - ) - marshalled + (List.concat_map + (fun marshalled -> + List.map + (fun (named, unnamed) -> (named, marshalled :: unnamed)) + params ) + marshalled ) f ) diff --git a/ocaml/xapi/binpack.ml b/ocaml/xapi/binpack.ml index e89a775c749..14c0405bd7b 100644 --- a/ocaml/xapi/binpack.ml +++ b/ocaml/xapi/binpack.ml @@ -107,15 +107,13 @@ let rec permutations : 'a list -> 'a list list = | [] -> [[]] | x :: xs -> - List.concat - (List.map - (fun perm -> - List.map - (fun n -> insert_at n x perm) - (mkints_exclusive (List.length xs + 1)) - ) - (permutations xs) + List.concat_map + (fun perm -> + List.map + (fun n -> insert_at n x perm) + (mkints_exclusive (List.length xs + 1)) ) + (permutations xs) let rec factorial = function 0 -> 1L | x -> Int64.of_int x ** factorial (x - 1) diff --git a/ocaml/xapi/eventgen.ml b/ocaml/xapi/eventgen.ml index f03db1e9bed..46ffd833866 100644 --- a/ocaml/xapi/eventgen.ml +++ b/ocaml/xapi/eventgen.ml @@ -35,30 +35,26 @@ let compute_object_references_to_follow (obj_name : string) = let objs = Dm_api.objects_of_api api in let obj = List.find (fun obj -> obj.Datamodel_types.name = obj_name) objs in let relations = Dm_api.relations_of_api api in - let symmetric = - List.concat (List.map (fun (a, b) -> [(a, b); (b, a)]) relations) - in + let symmetric = List.concat_map (fun (a, b) -> [(a, b); (b, a)]) relations in let set = Xapi_stdext_std.Listext.List.setify symmetric in - List.concat - (List.map - (function - | { - Datamodel_types.ty= Datamodel_types.Ref _ - ; Datamodel_types.field_name - ; _ - } -> - let this_end = (obj.Datamodel_types.name, field_name) in - if List.mem_assoc this_end set then - let other_end = List.assoc this_end set in - let other_obj = fst other_end in - [(other_obj, field_name)] - else - [] - | _ -> - [] - ) - (Datamodel_utils.fields_of_obj obj) - ) + List.concat_map + (function + | { + Datamodel_types.ty= Datamodel_types.Ref _ + ; Datamodel_types.field_name + ; _ + } -> + let this_end = (obj.Datamodel_types.name, field_name) in + if List.mem_assoc this_end set then + let other_end = List.assoc this_end set in + let other_obj = fst other_end in + [(other_obj, field_name)] + else + [] + | _ -> + [] + ) + (Datamodel_utils.fields_of_obj obj) let obj_references_table : (string, (string * string) list) Hashtbl.t = Hashtbl.create 30 @@ -79,17 +75,15 @@ let follow_references (obj_name : string) = (** Compute a set of modify events but skip any for objects which were missing (must have been dangling references) *) let events_of_other_tbl_refs other_tbl_refs = - List.concat - (List.map - (fun (tbl, fld, x) -> - try [(tbl, fld, x ())] - with _ -> - (* Probably means the reference was dangling *) - warn "skipping event for dangling reference %s: %s" tbl fld ; - [] - ) - other_tbl_refs + List.concat_map + (fun (tbl, fld, x) -> + try [(tbl, fld, x ())] + with _ -> + (* Probably means the reference was dangling *) + warn "skipping event for dangling reference %s: %s" tbl fld ; + [] ) + other_tbl_refs open Xapi_database.Db_cache_types open Xapi_database.Db_action_helper diff --git a/ocaml/xapi/extauth_plugin_ADpbis.ml b/ocaml/xapi/extauth_plugin_ADpbis.ml index fc73c7b7cb6..0e9bd3e44f8 100644 --- a/ocaml/xapi/extauth_plugin_ADpbis.ml +++ b/ocaml/xapi/extauth_plugin_ADpbis.ml @@ -981,11 +981,9 @@ module AuthADlw : Auth_signature.AUTH_MODULE = struct with Not_found -> [] in let disabled_module_params = - List.concat - (List.map - (fun disabled_module -> ["--disable"; disabled_module]) - disabled_modules - ) + List.concat_map + (fun disabled_module -> ["--disable"; disabled_module]) + disabled_modules in (* we need to make sure that the user passed to domaijoin-cli command is in the UPN syntax (user@domain.com) *) let user = convert_nt_to_upn_username _user in diff --git a/ocaml/xapi/hashtbl_xml.ml b/ocaml/xapi/hashtbl_xml.ml index 1169c60ae59..b1a746adef3 100644 --- a/ocaml/xapi/hashtbl_xml.ml +++ b/ocaml/xapi/hashtbl_xml.ml @@ -52,11 +52,11 @@ let of_xml (input : Xmlm.input) = let el (tag : Xmlm.tag) acc = match tag with | (_, "config"), _ -> - List.flatten acc + List.concat acc | (_, "row"), attrs -> let key = List.assoc ("", "key") attrs in let value = List.assoc ("", "value") attrs in - (key, value) :: List.flatten acc + (key, value) :: List.concat acc | (ns, name), _ -> raise (Unmarshall_error (Printf.sprintf "Unknown tag: (%s,%s)" ns name)) in diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index cbbbdb1f078..7c4af7b0f4b 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -195,7 +195,7 @@ let map_with_drop ?(doc = "performing unknown operation") f xs = (ExnHelper.string_of_exn e) ; [] in - List.concat (List.map one xs) + List.concat_map one xs (* Iterate a function across a list, ignoring applications which throw an exception *) let iter_with_drop ?(doc = "performing unknown operation") f xs = diff --git a/ocaml/xapi/monitor_master.ml b/ocaml/xapi/monitor_master.ml index ffad86ccd6c..c1dff9b8433 100644 --- a/ocaml/xapi/monitor_master.ml +++ b/ocaml/xapi/monitor_master.ml @@ -170,8 +170,7 @@ let update_pifs ~__context host pifs = pifrec.API.pIF_tunnel_transport_PIF_of in (pifrec.API.pIF_network :: vlan_networks) @ tunnel_networks - |> List.map vifs_on_local_bridge - |> List.flatten + |> List.concat_map vifs_on_local_bridge |> List.iter set_carrier with e -> log_backtrace () ; diff --git a/ocaml/xapi/monitor_mem_host.ml b/ocaml/xapi/monitor_mem_host.ml index afddc5d0f78..e4c2f012a24 100644 --- a/ocaml/xapi/monitor_mem_host.ml +++ b/ocaml/xapi/monitor_mem_host.ml @@ -21,45 +21,41 @@ open D let get_changes rrd_files = let named_dss = - List.flatten - (List.map - (fun filename -> - try - let datasources = - Monitor_types.datasources_from_filename filename - in - Mcache.log_errors_from filename ; - datasources - |> List.filter_map (function - | Rrd.Host, ds - when List.mem ds.Ds.ds_name - ["memory_total_kib"; "memory_free_kib"] -> - Some ds - | _ -> - None (* we are only interested in Host memory stats *) - ) - |> List.map (function ds -> - let value = - match ds.Ds.ds_value with - | Rrd.VT_Int64 v -> - Memory.bytes_of_kib v - | Rrd.VT_Float v -> - Memory.bytes_of_kib (Int64.of_float v) - | Rrd.VT_Unknown -> - -1L - in - (ds.Ds.ds_name, value) - ) - with e -> - if not (Mcache.is_ignored filename) then ( - error "Unable to read host memory metrics from %s: %s" filename - (Printexc.to_string e) ; - Mcache.ignore_errors_from filename - ) ; - [] - ) - rrd_files + List.concat_map + (fun filename -> + try + let datasources = Monitor_types.datasources_from_filename filename in + Mcache.log_errors_from filename ; + datasources + |> List.filter_map (function + | Rrd.Host, ds + when List.mem ds.Ds.ds_name + ["memory_total_kib"; "memory_free_kib"] -> + Some ds + | _ -> + None (* we are only interested in Host memory stats *) + ) + |> List.map (function ds -> + let value = + match ds.Ds.ds_value with + | Rrd.VT_Int64 v -> + Memory.bytes_of_kib v + | Rrd.VT_Float v -> + Memory.bytes_of_kib (Int64.of_float v) + | Rrd.VT_Unknown -> + -1L + in + (ds.Ds.ds_name, value) + ) + with e -> + if not (Mcache.is_ignored filename) then ( + error "Unable to read host memory metrics from %s: %s" filename + (Printexc.to_string e) ; + Mcache.ignore_errors_from filename + ) ; + [] ) + rrd_files in let free_bytes = List.assoc_opt "memory_free_kib" named_dss in let total_bytes = List.assoc_opt "memory_total_kib" named_dss in diff --git a/ocaml/xapi/nm.ml b/ocaml/xapi/nm.ml index d2f121bd3f1..1483106ace5 100644 --- a/ocaml/xapi/nm.ml +++ b/ocaml/xapi/nm.ml @@ -105,8 +105,7 @@ let determine_ethtool_settings properties oc = in let settings = speed @ duplex @ autoneg @ advertise in let offload = - List.flatten - (List.map proc ["rx"; "tx"; "sg"; "tso"; "ufo"; "gso"; "gro"; "lro"]) + List.concat_map proc ["rx"; "tx"; "sg"; "tso"; "ufo"; "gso"; "gro"; "lro"] in (settings, offload) diff --git a/ocaml/xapi/repository.ml b/ocaml/xapi/repository.ml index d798246d0b0..dd123557a49 100644 --- a/ocaml/xapi/repository.ml +++ b/ocaml/xapi/repository.ml @@ -570,8 +570,7 @@ let get_pool_updates_in_json ~__context ~hosts = in let lps = updates_of_hosts - |> List.map (fun x -> x.HostUpdates.livepatches) - |> List.concat + |> List.concat_map (fun x -> x.HostUpdates.livepatches) |> LivePatchSet.of_list in let updateinfo_list = diff --git a/ocaml/xapi/storage_smapiv1.ml b/ocaml/xapi/storage_smapiv1.ml index d8bf2cdc203..bc5023006aa 100644 --- a/ocaml/xapi/storage_smapiv1.ml +++ b/ocaml/xapi/storage_smapiv1.ml @@ -1063,8 +1063,7 @@ module SMAPIv1 : Server_impl = struct explore 0 StringMap.empty vdi_rec.API.vDI_location |> invert |> IntMap.bindings - |> List.map snd - |> List.concat + |> List.concat_map snd in let vdi_recs = List.map (fun l -> StringMap.find l locations) vdis in (* We drop cbt_metadata VDIs that do not have any actual data *) diff --git a/ocaml/xapi/storage_smapiv1_wrapper.ml b/ocaml/xapi/storage_smapiv1_wrapper.ml index 7c5a6a97f43..469be6a53c1 100644 --- a/ocaml/xapi/storage_smapiv1_wrapper.ml +++ b/ocaml/xapi/storage_smapiv1_wrapper.ml @@ -1111,7 +1111,7 @@ functor let title = Printf.sprintf "SR %s" (s_of_sr sr) in title :: List.map indent (Sr.to_string_list sr_t) in - let srs = List.concat (List.map of_sr srs) in + let srs = List.concat_map of_sr srs in let errors = List.map Errors.to_string (Errors.list ()) in let errors = ( if errors <> [] then @@ -1298,7 +1298,7 @@ functor let detach_destroy_common context ~dbg ~sr f = let active_dps sr_t = (* Enumerate all active datapaths *) - List.concat (List.map (fun (_, vdi_t) -> Vdi.dp vdi_t) (Sr.list sr_t)) + List.concat_map (fun (_, vdi_t) -> Vdi.dp vdi_t) (Sr.list sr_t) in with_sr sr (fun () -> match Host.find sr !Host.host with diff --git a/ocaml/xapi/valid_ref_list.ml b/ocaml/xapi/valid_ref_list.ml index f192830c735..ef950dd062c 100644 --- a/ocaml/xapi/valid_ref_list.ml +++ b/ocaml/xapi/valid_ref_list.ml @@ -19,6 +19,6 @@ let map f = List.filter_map (default_on_missing_ref (fun x -> Some (f x)) None) let iter f = List.iter (default_on_missing_ref f ()) -let flat_map f l = List.map (default_on_missing_ref f []) l |> List.flatten +let flat_map f l = List.concat_map (default_on_missing_ref f []) l let filter_map f l = List.filter_map Fun.id (map f l) diff --git a/ocaml/xapi/xapi_bond.ml b/ocaml/xapi/xapi_bond.ml index 173a789ac2b..72d762ff193 100644 --- a/ocaml/xapi/xapi_bond.ml +++ b/ocaml/xapi/xapi_bond.ml @@ -79,8 +79,9 @@ let get_local_vifs ~__context host networks = (* Construct (VM -> VIFs) map for all VIFs on the given networks *) let vms_with_vifs = Hashtbl.create 10 in let all_vifs = - List.concat - (List.map (fun net -> Db.Network.get_VIFs ~__context ~self:net) networks) + List.concat_map + (fun net -> Db.Network.get_VIFs ~__context ~self:net) + networks in let add_vif vif = let vm = Db.VIF.get_VM ~__context ~self:vif in @@ -103,13 +104,9 @@ let get_local_vifs ~__context host networks = (* Make a list of the VIFs for local VMs *) let vms = Hashtbl.to_seq_keys vms_with_vifs |> List.of_seq in let local_vifs = - List.concat - (List.map - (fun vm -> - if is_local vm then Hashtbl.find_all vms_with_vifs vm else [] - ) - vms - ) + List.concat_map + (fun vm -> if is_local vm then Hashtbl.find_all vms_with_vifs vm else []) + vms in debug "Found these local VIFs: %s" (String.concat ", " @@ -231,18 +228,14 @@ let fix_bond ~__context ~bond = in let local_vifs = get_local_vifs ~__context host member_networks in let local_vlans = - List.concat - (List.map - (fun pif -> Db.PIF.get_VLAN_slave_of ~__context ~self:pif) - members - ) + List.concat_map + (fun pif -> Db.PIF.get_VLAN_slave_of ~__context ~self:pif) + members in let local_tunnels = - List.concat - (List.map - (fun pif -> Db.PIF.get_tunnel_transport_PIF_of ~__context ~self:pif) - members - ) + List.concat_map + (fun pif -> Db.PIF.get_tunnel_transport_PIF_of ~__context ~self:pif) + members in (* Move VLANs from members to master *) debug "Checking VLANs to move from slaves to master" ; @@ -356,18 +349,15 @@ let create ~__context ~network ~members ~mAC ~mode ~properties = in let local_vifs = get_local_vifs ~__context host member_networks in let local_vlans = - List.concat - (List.map - (fun pif -> Db.PIF.get_VLAN_slave_of ~__context ~self:pif) - members - ) + List.concat_map + (fun pif -> Db.PIF.get_VLAN_slave_of ~__context ~self:pif) + members in + let local_tunnels = - List.concat - (List.map - (fun pif -> Db.PIF.get_tunnel_transport_PIF_of ~__context ~self:pif) - members - ) + List.concat_map + (fun pif -> Db.PIF.get_tunnel_transport_PIF_of ~__context ~self:pif) + members in let is_management_on_vlan = List.filter diff --git a/ocaml/xapi/xapi_clustering.ml b/ocaml/xapi/xapi_clustering.ml index 9f21b4c43c4..ec6efe81d00 100644 --- a/ocaml/xapi/xapi_clustering.ml +++ b/ocaml/xapi/xapi_clustering.ml @@ -144,9 +144,10 @@ let get_required_cluster_stacks ~__context ~sr_sm_type = in let sms_matching_sr_type = Db.SM.get_records_where ~__context ~expr in sms_matching_sr_type - |> List.map (fun (_sm_ref, sm_rec) -> sm_rec.API.sM_required_cluster_stack) (* We assume that we only have one SM for each SR type, so this is only to satisfy type checking *) - |> List.flatten + |> List.concat_map (fun (_sm_ref, sm_rec) -> + sm_rec.API.sM_required_cluster_stack + ) let assert_cluster_stack_valid ~cluster_stack = if not (List.mem cluster_stack Constants.supported_smapiv3_cluster_stacks) diff --git a/ocaml/xapi/xapi_guest_agent.ml b/ocaml/xapi/xapi_guest_agent.ml index bd13e808ec8..7de892cdf79 100644 --- a/ocaml/xapi/xapi_guest_agent.ml +++ b/ocaml/xapi/xapi_guest_agent.ml @@ -196,12 +196,12 @@ let networks path vif_type (list : string -> string list) = | [] -> path |> find_eths - |> List.map (fun (path, prefix) -> find_all_ips path prefix) - |> List.concat + |> List.concat_map (fun (path, prefix) -> find_all_ips path prefix) | vif_pair_list -> vif_pair_list - |> List.map (fun (vif_path, vif_id) -> find_all_vif_ips vif_path vif_id) - |> List.concat + |> List.concat_map (fun (vif_path, vif_id) -> + find_all_vif_ips vif_path vif_id + ) (* One key is placed in the other map per control/* key in xenstore. This catches keys like "feature-shutdown" "feature-hibernate" "feature-reboot" @@ -242,19 +242,17 @@ let get_initial_guest_metrics (lookup : string -> string option) let all_control = list "control" in let cant_suspend_reason = lookup "data/cant_suspend_reason" in let to_map kvpairs = - List.concat - (List.map - (fun (xskey, mapkey) -> - match (lookup xskey, xskey, cant_suspend_reason) with - | Some _, "control/feature-suspend", Some reason -> - [("data-cant-suspend-reason", reason)] - | Some xsval, _, _ -> - [(mapkey, xsval)] - | None, _, _ -> - [] - ) - kvpairs + List.concat_map + (fun (xskey, mapkey) -> + match (lookup xskey, xskey, cant_suspend_reason) with + | Some _, "control/feature-suspend", Some reason -> + [("data-cant-suspend-reason", reason)] + | Some xsval, _, _ -> + [(mapkey, xsval)] + | None, _, _ -> + [] ) + kvpairs in let get_tristate xskey = match lookup xskey with diff --git a/ocaml/xapi/xapi_ha_vm_failover.ml b/ocaml/xapi/xapi_ha_vm_failover.ml index c834e384251..322d30f7996 100644 --- a/ocaml/xapi/xapi_ha_vm_failover.ml +++ b/ocaml/xapi/xapi_ha_vm_failover.ml @@ -928,11 +928,9 @@ let compute_restart_plan ~__context ~all_protected_vms ~live_set actually running somewhere else (very strange semi-agile situation) then it will be counted as overhead there and plans will be made for it running on the host we choose. *) let pinned = - List.concat - (List.map - (host_of_non_agile_vm ~__context all_hosts_and_snapshots) - not_agile_vms - ) + List.concat_map + (host_of_non_agile_vm ~__context all_hosts_and_snapshots) + not_agile_vms in (* The restart plan for offline non-agile VMs is just the map VM -> pinned Host *) let non_agile_restart_plan = @@ -955,19 +953,15 @@ let compute_restart_plan ~__context ~all_protected_vms ~live_set in (* All these hosts are live and the VMs are running (or scheduled to be running): *) let agile_vm_placement = - List.concat - (List.map - (fun (vm, host) -> match host with Some h -> [(vm, h)] | _ -> []) - agile_vm_accounted_to_host - ) + List.concat_map + (fun (vm, host) -> match host with Some h -> [(vm, h)] | _ -> []) + agile_vm_accounted_to_host in (* These VMs are not running on any host (either in real life or only hypothetically) *) let agile_vm_failed = - List.concat - (List.map - (fun (vm, host) -> if host = None then [vm] else []) - agile_vm_accounted_to_host - ) + List.concat_map + (fun (vm, host) -> if host = None then [vm] else []) + agile_vm_accounted_to_host in let config = { diff --git a/ocaml/xapi/xapi_host.ml b/ocaml/xapi/xapi_host.ml index e8162430943..32139f79896 100644 --- a/ocaml/xapi/xapi_host.ml +++ b/ocaml/xapi/xapi_host.ml @@ -195,8 +195,7 @@ let assert_bacon_mode ~__context ~host = && Db.VM.get_is_control_domain ~__context ~self:vm ) (Db.VM.get_all ~__context) - |> List.map (fun self -> Db.VM.get_VBDs ~__context ~self) - |> List.flatten + |> List.concat_map (fun self -> Db.VM.get_VBDs ~__context ~self) |> List.filter (fun self -> Db.VBD.get_currently_attached ~__context ~self) in if control_domain_vbds <> [] then diff --git a/ocaml/xapi/xapi_host_helpers.ml b/ocaml/xapi/xapi_host_helpers.ml index beb3f2d13b0..eb707de3823 100644 --- a/ocaml/xapi/xapi_host_helpers.ml +++ b/ocaml/xapi/xapi_host_helpers.ml @@ -135,10 +135,9 @@ let valid_operations ~__context record _ref' = [List.hd plugged_clustered_srs |> Ref.string_of] [`shutdown; `reboot; `apply_updates] ; let recovering_tasks = - List.map + List.concat_map (fun sr -> Helpers.find_health_check_task ~__context ~sr) plugged_clustered_srs - |> List.concat in if recovering_tasks <> [] then set_errors Api_errors.clustered_sr_degraded diff --git a/ocaml/xapi/xapi_pbd.ml b/ocaml/xapi/xapi_pbd.ml index 67fc069c8df..7ba1fd8642d 100644 --- a/ocaml/xapi/xapi_pbd.ml +++ b/ocaml/xapi/xapi_pbd.ml @@ -76,7 +76,7 @@ let get_active_vdis_by_pbd ~__context ~self = Db.VM.get_records_where ~__context ~expr:(Eq (Field "resident_on", Literal (Ref.string_of host))) in - let vbds = List.flatten (List.map (fun (_, vmr) -> vmr.API.vM_VBDs) vms) in + let vbds = List.concat_map (fun (_, vmr) -> vmr.API.vM_VBDs) vms in let vbds_r = List.map (fun self -> Db.VBD.get_record_internal ~__context ~self) vbds in diff --git a/ocaml/xapi/xapi_pci.ml b/ocaml/xapi/xapi_pci.ml index 1ff5620cf58..7c805c7e9cf 100644 --- a/ocaml/xapi/xapi_pci.ml +++ b/ocaml/xapi/xapi_pci.ml @@ -240,7 +240,7 @@ let update_pcis ~__context = ) host_pcis in - let deps = List.flatten (List.map (fun pci -> pci.related) class_pcis) in + let deps = List.concat_map (fun pci -> pci.related) class_pcis in let deps = List.map (fun dep -> List.find (fun pci -> pci.address = dep) host_pcis) diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index ef79c86cad1..13b1d698714 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -686,16 +686,16 @@ let pre_join_checks ~__context ~rpc ~session_id ~force = try let my_nbdish = Db.Network.get_all ~__context - |> List.map (fun nwk -> Db.Network.get_purpose ~__context ~self:nwk) - |> List.flatten + |> List.concat_map (fun nwk -> + Db.Network.get_purpose ~__context ~self:nwk + ) |> List.find (function `nbd | `insecure_nbd -> true | _ -> false) in let remote_nbdish = Client.Network.get_all ~rpc ~session_id - |> List.map (fun nwk -> + |> List.concat_map (fun nwk -> Client.Network.get_purpose ~rpc ~session_id ~self:nwk ) - |> List.flatten |> List.find (function `nbd | `insecure_nbd -> true | _ -> false) in if remote_nbdish <> my_nbdish then @@ -2530,18 +2530,16 @@ let ha_compute_vm_failover_plan ~__context ~failed_hosts ~failed_vms = (String.concat "; " (List.map Ref.string_of live_hosts)) ; (* All failed_vms must be agile *) let errors = - List.concat - (List.map - (fun self -> - try - Agility.vm_assert_agile ~__context ~self ; - [(self, [("error_code", Api_errors.host_not_enough_free_memory)])] - (* default *) - with Api_errors.Server_error (code, _) -> - [(self, [("error_code", code)])] - ) - failed_vms + List.concat_map + (fun self -> + try + Agility.vm_assert_agile ~__context ~self ; + [(self, [("error_code", Api_errors.host_not_enough_free_memory)])] + (* default *) + with Api_errors.Server_error (code, _) -> + [(self, [("error_code", code)])] ) + failed_vms in let plan = List.map diff --git a/ocaml/xapi/xapi_pvs_server.ml b/ocaml/xapi/xapi_pvs_server.ml index dc6c5f59212..d1f5062f448 100644 --- a/ocaml/xapi/xapi_pvs_server.ml +++ b/ocaml/xapi/xapi_pvs_server.ml @@ -26,7 +26,7 @@ let introduce ~__context ~addresses ~first_port ~last_port ~site = addresses ; let current = Db.PVS_server.get_all_records ~__context in let current_addresses = - List.map (fun (_, r) -> r.API.pVS_server_addresses) current |> List.concat + List.concat_map (fun (_, r) -> r.API.pVS_server_addresses) current in let in_use = Listext.intersect addresses current_addresses in if in_use <> [] then diff --git a/ocaml/xapi/xapi_vbd_helpers.ml b/ocaml/xapi/xapi_vbd_helpers.ml index c5a370df137..f6b1cc260e7 100644 --- a/ocaml/xapi/xapi_vbd_helpers.ml +++ b/ocaml/xapi/xapi_vbd_helpers.ml @@ -247,13 +247,11 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = let vbds = List.filter (fun vbd -> vbd <> _ref') vdi_record.Db_actions.vDI_VBDs in - List.concat - (List.map - (fun self -> - try [Db.VBD.get_record_internal ~__context ~self] with _ -> [] - ) - vbds + List.concat_map + (fun self -> + try [Db.VBD.get_record_internal ~__context ~self] with _ -> [] ) + vbds in let pointing_to_a_suspended_VM vbd = Db.VM.get_power_state ~__context ~self:vbd.Db_actions.vBD_VM diff --git a/ocaml/xapi/xapi_vgpu_type.ml b/ocaml/xapi/xapi_vgpu_type.ml index 9656aa8f959..f7d5e1eb408 100644 --- a/ocaml/xapi/xapi_vgpu_type.ml +++ b/ocaml/xapi/xapi_vgpu_type.ml @@ -508,7 +508,7 @@ module Vendor_nvidia = struct | E (n, _, _) as t when n = name -> [t] | E (_, _, ch) -> - List.map (find_by_name name) ch |> List.concat + List.concat_map (find_by_name name) ch | D _ -> [] diff --git a/ocaml/xapi/xapi_vm_helpers.ml b/ocaml/xapi/xapi_vm_helpers.ml index 0387dee1952..b7596bfbc67 100644 --- a/ocaml/xapi/xapi_vm_helpers.ml +++ b/ocaml/xapi/xapi_vm_helpers.ml @@ -1158,7 +1158,7 @@ let choose_host_for_vm_no_wlb ~__context ~vm ~snapshot = let validate_host = vm_can_run_on_host ~__context ~vm ~snapshot ~do_memory_check:false in - List.flatten host_lists + List.concat host_lists |> Xapi_vm_placement.select_host __context vm validate_host (** choose_host_for_vm will use WLB as long as it is enabled and there @@ -1328,7 +1328,7 @@ let all_used_VBD_devices ~__context ~self = in all_devices @ all_devices2 in - List.concat (List.map possible_VBD_devices_of_string existing_devices) + List.concat_map possible_VBD_devices_of_string existing_devices let allowed_VBD_devices ~__context ~vm ~_type = let will_have_qemu = Helpers.will_have_qemu ~__context ~self:vm in diff --git a/ocaml/xapi/xapi_vm_migrate.ml b/ocaml/xapi/xapi_vm_migrate.ml index 1f4994fee6c..d35a6b98718 100644 --- a/ocaml/xapi/xapi_vm_migrate.ml +++ b/ocaml/xapi/xapi_vm_migrate.ml @@ -342,7 +342,7 @@ let infer_vgpu_map ~__context ?remote vm = else [(pf_device, pf ())] in - try Db.VM.get_VGPUs ~__context ~self:vm |> List.map f |> List.concat + try Db.VM.get_VGPUs ~__context ~self:vm |> List.concat_map f with e -> raise (VGPU_mapping (Printexc.to_string e)) ) | Some {rpc; session; _} -> ( @@ -370,10 +370,7 @@ let infer_vgpu_map ~__context ?remote vm = else [(pf_device, pf ())] in - try - XenAPI.VM.get_VGPUs ~rpc ~session_id ~self:vm - |> List.map f - |> List.concat + try XenAPI.VM.get_VGPUs ~rpc ~session_id ~self:vm |> List.concat_map f with e -> raise (VGPU_mapping (Printexc.to_string e)) ) @@ -1199,12 +1196,10 @@ let migrate_send' ~__context ~vm ~dest ~live:_ ~vdi_map ~vif_map ~vgpu_map let snapshots = Db.VM.get_snapshots ~__context ~self:vm in let vm_and_snapshots = vm :: snapshots in let snapshots_vbds = - List.flatten - (List.map (fun self -> Db.VM.get_VBDs ~__context ~self) snapshots) + List.concat_map (fun self -> Db.VM.get_VBDs ~__context ~self) snapshots in let snapshot_vifs = - List.flatten - (List.map (fun self -> Db.VM.get_VIFs ~__context ~self) snapshots) + List.concat_map (fun self -> Db.VM.get_VIFs ~__context ~self) snapshots in let is_intra_pool = try @@ -1838,8 +1833,7 @@ let assert_can_migrate ~__context ~vm ~dest ~live:_ ~vdi_map ~vif_map ~options let vifs = Db.VM.get_VIFs ~__context ~self:vm in let snapshots = Db.VM.get_snapshots ~__context ~self:vm in let snapshot_vifs = - List.flatten - (List.map (fun self -> Db.VM.get_VIFs ~__context ~self) snapshots) + List.concat_map (fun self -> Db.VM.get_VIFs ~__context ~self) snapshots in let vif_map = infer_vif_map ~__context (vifs @ snapshot_vifs) vif_map in try diff --git a/ocaml/xapi/xapi_xenops.ml b/ocaml/xapi/xapi_xenops.ml index 0cfe9493d1a..9b8b73f145c 100644 --- a/ocaml/xapi/xapi_xenops.ml +++ b/ocaml/xapi/xapi_xenops.ml @@ -864,16 +864,14 @@ module MD = struct let pcis_of_vm ~__context (vmref, vm) = let vgpu_pcidevs = Vgpuops.list_pcis_for_passthrough ~__context ~vm:vmref in let devs = - List.flatten - (List.map (fun (_, dev) -> dev) (Pciops.sort_pcidevs vgpu_pcidevs)) + List.concat_map (fun (_, dev) -> dev) (Pciops.sort_pcidevs vgpu_pcidevs) in (* The 'unmanaged' PCI devices are in the other_config key: *) let other_pcidevs = Pciops.other_pcidevs_of_vm ~__context vm.API.vM_other_config in let unmanaged = - List.flatten - (List.map (fun (_, dev) -> dev) (Pciops.sort_pcidevs other_pcidevs)) + List.concat_map (fun (_, dev) -> dev) (Pciops.sort_pcidevs other_pcidevs) in let net_sriov_pcidevs = list_net_sriov_vf_pcis ~__context ~vm in let devs = devs @ net_sriov_pcidevs @ unmanaged in @@ -3000,14 +2998,13 @@ let resync_resident_on ~__context = in (* Get a list of VMs that the xenopsds know about with their xenopsd client *) let vms_in_xenopsds = - List.map + List.concat_map (fun queue_name -> let module Client = (val make_client queue_name : XENOPS) in let vms = Client.VM.list dbg () in List.map (fun (vm, state) -> ((vm.Vm.id, state), queue_name)) vms ) (all_known_xenopsds ()) - |> List.flatten in (* The list of VMs xenopsd knows about that (xapi knows about at all, xapi has no idea about at all) *) diff --git a/ocaml/xapi/xha_interface.ml b/ocaml/xapi/xha_interface.ml index 53be303e04c..e89d22978ab 100644 --- a/ocaml/xapi/xha_interface.ml +++ b/ocaml/xapi/xha_interface.ml @@ -172,36 +172,32 @@ module DaemonConfiguration = struct Xml.Element ( "parameters" , [] - , List.concat - (List.map int_parameter - [ - ("HeartbeatInterval", config.heart_beat_interval) - ; ("HeartbeatTimeout", config.heart_beat_timeout) - ; ("StateFileInterval", config.state_file_interval) - ; ("StateFileTimeout", config.state_file_timeout) - ; ( "HeartbeatWatchdogTimeout" - , config.heart_beat_watchdog_timeout - ) - ; ( "StateFileWatchdogTimeout" - , config.state_file_watchdog_timeout - ) - ; ("BootJoinTimeout", config.boot_join_timeout) - ; ("EnableJoinTimeout", config.enable_join_timeout) - ; ( "XapiHealthCheckInterval" - , config.xapi_healthcheck_interval - ) - ; ( "XapiHealthCheckTimeout" - , config.xapi_healthcheck_timeout - ) - ; ( "XapiRestartAttempts" - , config.xapi_restart_attempts - ) - ; ("XapiRestartTimeout", config.xapi_restart_timeout) - ; ( "XapiLicenseCheckTimeout" - , config.xapi_licensecheck_timeout - ) - ] - ) + , List.concat_map int_parameter + [ + ("HeartbeatInterval", config.heart_beat_interval) + ; ("HeartbeatTimeout", config.heart_beat_timeout) + ; ("StateFileInterval", config.state_file_interval) + ; ("StateFileTimeout", config.state_file_timeout) + ; ( "HeartbeatWatchdogTimeout" + , config.heart_beat_watchdog_timeout + ) + ; ( "StateFileWatchdogTimeout" + , config.state_file_watchdog_timeout + ) + ; ("BootJoinTimeout", config.boot_join_timeout) + ; ("EnableJoinTimeout", config.enable_join_timeout) + ; ( "XapiHealthCheckInterval" + , config.xapi_healthcheck_interval + ) + ; ( "XapiHealthCheckTimeout" + , config.xapi_healthcheck_timeout + ) + ; ("XapiRestartAttempts", config.xapi_restart_attempts) + ; ("XapiRestartTimeout", config.xapi_restart_timeout) + ; ( "XapiLicenseCheckTimeout" + , config.xapi_licensecheck_timeout + ) + ] ) ] ) diff --git a/ocaml/xcp-rrdd/bin/rrdd/rrdd_monitor.ml b/ocaml/xcp-rrdd/bin/rrdd/rrdd_monitor.ml index f6a9fa43646..5d445e0f7dc 100644 --- a/ocaml/xcp-rrdd/bin/rrdd/rrdd_monitor.ml +++ b/ocaml/xcp-rrdd/bin/rrdd/rrdd_monitor.ml @@ -9,20 +9,18 @@ open D let create_rras use_min_max = (* Create archives of type min, max and average and last *) Array.of_list - (List.flatten - (List.map - (fun (n, ns) -> - if ns > 1 && use_min_max then - [ - Rrd.rra_create Rrd.CF_Average n ns 1.0 - ; Rrd.rra_create Rrd.CF_Min n ns 1.0 - ; Rrd.rra_create Rrd.CF_Max n ns 1.0 - ] - else - [Rrd.rra_create Rrd.CF_Average n ns 0.5] - ) - timescales + (List.concat_map + (fun (n, ns) -> + if ns > 1 && use_min_max then + [ + Rrd.rra_create Rrd.CF_Average n ns 1.0 + ; Rrd.rra_create Rrd.CF_Min n ns 1.0 + ; Rrd.rra_create Rrd.CF_Max n ns 1.0 + ] + else + [Rrd.rra_create Rrd.CF_Average n ns 0.5] ) + timescales ) let step = 5L diff --git a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml index e09b4b52511..5b20dc77393 100644 --- a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml +++ b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml @@ -511,7 +511,7 @@ let do_monitor_write xc writers = let timestamp, domains, my_paused_vms = domain_snapshot xc in let tagged_dom0_stats = generate_all_dom0_stats xc timestamp domains in write_dom0_stats writers (Int64.of_float timestamp) tagged_dom0_stats ; - let dom0_stats = List.concat (List.map snd tagged_dom0_stats) in + let dom0_stats = List.concat_map snd tagged_dom0_stats in let plugins_stats = Rrdd_server.Plugin.read_stats () in let stats = List.rev_append plugins_stats dom0_stats in Rrdd_stats.print_snapshot () ; diff --git a/ocaml/xcp-rrdd/bin/rrdp-iostat/rrdp_iostat.ml b/ocaml/xcp-rrdd/bin/rrdp-iostat/rrdp_iostat.ml index 1502a07f9fa..057d6e9dc47 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-iostat/rrdp_iostat.ml +++ b/ocaml/xcp-rrdd/bin/rrdp-iostat/rrdp_iostat.ml @@ -52,7 +52,7 @@ let update_vdi_to_vm_map () = (* Get VBDs for this domain *) let enoents = ref 0 in let vbds = - List.map + List.concat_map (fun base_path -> try let path = Printf.sprintf "%s/%d" base_path domid in @@ -75,7 +75,6 @@ let update_vdi_to_vm_map () = [] ) base_paths - |> List.flatten in if !enoents = List.length base_paths then @@ -103,7 +102,7 @@ let update_vdi_to_vm_map () = vbds ) domUs - |> List.flatten + |> List.concat ) with e -> D.error "Error while constructing VDI-to-VM map: %s" (Printexc.to_string e) ; @@ -1080,34 +1079,30 @@ let gen_metrics () = in (* Lookup the VM(s) for this VDI and associate with the RRD for those VM(s) *) let data_sources_vm_iostats = - List.flatten - (List.map - (fun ((_sr, vdi), iostats_value) -> - let create_metrics (vm, pos, _devid) = - let key_format key = Printf.sprintf "vbd_%s_%s" pos key in - Iostats_value.make_ds ~owner:(Rrd.VM vm) ~name:"VDI" ~key_format - iostats_value - in - let vms = list_all_assocs vdi vdi_to_vm in - List.map create_metrics vms - ) - sr_vdi_to_iostats_values + List.concat_map + (fun ((_sr, vdi), iostats_value) -> + let create_metrics (vm, pos, _devid) = + let key_format key = Printf.sprintf "vbd_%s_%s" pos key in + Iostats_value.make_ds ~owner:(Rrd.VM vm) ~name:"VDI" ~key_format + iostats_value + in + let vms = list_all_assocs vdi vdi_to_vm in + List.map create_metrics vms ) + sr_vdi_to_iostats_values in let data_sources_vm_stats = - List.flatten - (List.map - (fun ((_sr, vdi), stats_value) -> - let create_metrics (vm, pos, _devid) = - let key_format key = Printf.sprintf "vbd_%s_%s" pos key in - Stats_value.make_ds ~owner:(Rrd.VM vm) ~name:"VDI" ~key_format - stats_value - in - let vms = list_all_assocs vdi vdi_to_vm in - List.map create_metrics vms - ) - sr_vdi_to_stats_values + List.concat_map + (fun ((_sr, vdi), stats_value) -> + let create_metrics (vm, pos, _devid) = + let key_format key = Printf.sprintf "vbd_%s_%s" pos key in + Stats_value.make_ds ~owner:(Rrd.VM vm) ~name:"VDI" ~key_format + stats_value + in + let vms = list_all_assocs vdi vdi_to_vm in + List.map create_metrics vms ) + sr_vdi_to_stats_values in (* convert recent stats data to hashtbl for next iterator use *) @@ -1122,7 +1117,7 @@ let gen_metrics () = sr_vdi_to_last_stats_values := Some (to_hashtbl sr_vdi_to_stats) ; domid_devid_to_last_stats_blktap3 := Some domid_devid_to_stats_blktap3 ; - List.flatten + List.concat (data_sources_stats @ data_sources_iostats @ data_sources_vm_stats diff --git a/ocaml/xcp-rrdd/bin/rrdp-xenpm/rrdp_xenpm.ml b/ocaml/xcp-rrdd/bin/rrdp-xenpm/rrdp_xenpm.ml index 55c93ef7bfd..6ce1aeb525b 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-xenpm/rrdp_xenpm.ml +++ b/ocaml/xcp-rrdd/bin/rrdp-xenpm/rrdp_xenpm.ml @@ -120,7 +120,7 @@ let generate_state_dss state_kind = (fun state_id time -> gen_pm_ds state_kind cpu_id state_id time) times ) - |> List.flatten + |> List.concat with _ -> [] let generate_cpu_averages () = diff --git a/ocaml/xenopsd/lib/xenops_server.ml b/ocaml/xenopsd/lib/xenops_server.ml index e65b929e1f4..669af5566a1 100644 --- a/ocaml/xenopsd/lib/xenops_server.ml +++ b/ocaml/xenopsd/lib/xenops_server.ml @@ -1004,12 +1004,10 @@ module Redirector = struct ) (Queues.tags queue) in - List.concat - (List.map one - (default.queues - :: parallel_queues.queues - :: List.map snd (StringMap.bindings !overrides) - ) + List.concat_map one + (default.queues + :: parallel_queues.queues + :: List.map snd (StringMap.bindings !overrides) ) ) end @@ -3057,7 +3055,7 @@ and perform_exn ?subtask ?result (op : operation) (t : Xenops_task.task_handle) | Vm.Softreboot -> [Atomic (VM_softreboot id)] in - let operations = List.concat (List.map operations_of_action actions) in + let operations = List.concat_map operations_of_action actions in List.iter (fun x -> perform_exn x t) operations ; VM_DB.signal id | PCI_check_state id -> diff --git a/ocaml/xenopsd/test/test_topology.ml b/ocaml/xenopsd/test/test_topology.ml index 79d0f79217d..1863f546321 100644 --- a/ocaml/xenopsd/test/test_topology.ml +++ b/ocaml/xenopsd/test/test_topology.ml @@ -79,9 +79,7 @@ let vm_access_costs host all_vms (vcpus, nodes, cpuset) = in D.debug "Costs: %s" (Fmt.to_to_string pp costs) ; let cpus = float @@ CPUSet.cardinal cpuset in - let nodes = - all_vms |> List.map (fun ((_, nodes), _) -> nodes) |> List.flatten - in + let nodes = all_vms |> List.concat_map (fun ((_, nodes), _) -> nodes) in {costs with average= costs.average /. cpus; nodes} let cost_not_worse ~default c = diff --git a/ocaml/xenopsd/xc/device.ml b/ocaml/xenopsd/xc/device.ml index 3f6da8152a6..235f6457875 100644 --- a/ocaml/xenopsd/xc/device.ml +++ b/ocaml/xenopsd/xc/device.ml @@ -1100,7 +1100,7 @@ module PCI = struct ) (* From - https://github.com/torvalds/linux/blob/v4.19/include/linux/pci.h#L76-L102 *) + https://github.com/torvalds/linux/blob/v4.19/include/linux/pci.h#L76-L102 *) (* same as libxl_internal: PROC_PCI_NUM_RESOURCES *) let _proc_pci_num_resources = 7 @@ -1112,7 +1112,7 @@ module PCI = struct let _xen_domctl_dev_rdm_relaxed = 1 (* XXX: we don't want to use the 'xl' command here because the "interface" - isn't considered as stable as the C API *) + isn't considered as stable as the C API *) let xl_pci cmd pcidevs domid = List.iter (fun dev -> @@ -1141,7 +1141,7 @@ module PCI = struct Printf.sprintf "%s/backend/pci/%d/0" be_path fe_domid (* Given a domid, return a list of [ X, (domain, bus, dev, func) ] where X - indicates the order in which the device was plugged. *) + indicates the order in which the device was plugged. *) let read_pcidir ~xs domid = let path = device_model_pci_device_path xs 0 domid in let prefix = "dev-" in @@ -1436,7 +1436,7 @@ module PCI = struct let nvidia_manage = "/usr/lib/nvidia/sriov-manage" (** [num_vfs devstr] returns the number of PCI VFs of [devstr] or 0 if - [devstr] is not an SRIOV device *) + [devstr] is not an SRIOV device *) let num_vfs devstr = let path = sysfs_devices // devstr // "sriov_numvfs" in try Some (Unixext.string_of_file path |> String.trim |> int_of_string) with @@ -1448,8 +1448,8 @@ module PCI = struct (Printexc.to_string exn) (** [vfs_of device] returns the PCI addresses of the virtual functions of PCI - [device]. We find each virtual function by looking at the virtfnX symlink - in [device]. *) + [device]. We find each virtual function by looking at the virtfnX symlink + in [device]. *) let vfs_of devstr = let virtfn n = let path = sysfs_devices // devstr // Printf.sprintf "virtfn%d" n in @@ -1466,8 +1466,8 @@ module PCI = struct [] (** [deactivate_nvidia_sriov devstr] deactivates SRIOV PCI VFs of [devstr] if - necessary. This needs to be called for NVidia GPUs before using [devstr] - as a pass-through GPU. *) + necessary. This needs to be called for NVidia GPUs before using [devstr] + as a pass-through GPU. *) let deactivate_nvidia_sriov devstr = let cmd = nvidia_manage in let args = ["-d"; devstr] in @@ -1916,7 +1916,7 @@ end = struct None (** query qemu for the serial console and write it to xenstore. Only write - path for a real console, not a file or socket path. CA-318579 *) + path for a real console, not a file or socket path. CA-318579 *) let update_xenstore ~xs domid = if not @@ Service.Qemu.is_running ~xs domid then internal_error "Qemu not running for domain %d (%s)" domid __LOC__ ; @@ -1934,12 +1934,12 @@ end let can_surprise_remove ~xs (x : device) = Generic.can_surprise_remove ~xs x (** Dm_Common contains the private Dm functions that are common between the qemu - profile backends. *) + profile backends. *) module Dm_Common = struct (* An example one: [/usr/lib/xen/bin/qemu-dm -d 39 -m 256 -boot cd -serial pty - -usb -usbdevice tablet -domain-name bee94ac1-8f97-42e0-bf77-5cb7a6b664ee - -net nic,vlan=1,macaddr=00:16:3E:76:CE:44,model=rtl8139 -net - tap,vlan=1,bridge=xenbr0 -vnc 39 -k en-us -vnclisten 127.0.0.1] *) + -usb -usbdevice tablet -domain-name bee94ac1-8f97-42e0-bf77-5cb7a6b664ee + -net nic,vlan=1,macaddr=00:16:3E:76:CE:44,model=rtl8139 -net + tap,vlan=1,bridge=xenbr0 -vnc 39 -k en-us -vnclisten 127.0.0.1] *) type usb_opt = Enabled of (string * int) list | Disabled @@ -2081,7 +2081,7 @@ module Dm_Common = struct let vga_type_opts x = let open Xenops_interface.Vgpu in (* We can match on the implementation details to detect the VCS - case. Don't pass -vgpu for a compute vGPU. *) + case. Don't pass -vgpu for a compute vGPU. *) match x with | Vgpu ({implementation= Nvidia {vclass= Some "Compute"; _}; _} :: _) -> ["-std-vga"] @@ -2099,7 +2099,7 @@ module Dm_Common = struct ; Int64.to_string gvt_g.fence_sz ] and priv_opt = ["-priv"] in - List.flatten [base_opts; priv_opt] + List.concat [base_opts; priv_opt] | Vgpu [{implementation= MxGPU _; _}] -> [] | Vgpu _ -> @@ -2136,7 +2136,7 @@ module Dm_Common = struct in let vnc_opt = ["-vnc"; vnc_arg] in let keymap_opt = match keymap with Some k -> ["-k"; k] | None -> [] in - List.flatten [unused_opt; vnc_opt; keymap_opt] + List.concat [unused_opt; vnc_opt; keymap_opt] in let disp_options, wait_for_port = match info.disp with @@ -2166,17 +2166,15 @@ module Dm_Common = struct ; (info.acpi |> function false -> [] | true -> ["-acpi"]) ; (restore |> function false -> [] | true -> ["-loadvm"; restorefile]) ; info.pci_emulations - |> List.map (fun pci -> ["-pciemulation"; pci]) - |> List.concat + |> List.concat_map (fun pci -> ["-pciemulation"; pci]) ; (info.pci_passthrough |> function false -> [] | true -> ["-priv"]) ; List.rev info.extras - |> List.map (function + |> List.concat_map (function | k, None -> ["-" ^ k] | k, Some v -> ["-" ^ k; v] ) - |> List.concat ; (info.monitor |> function None -> [] | Some x -> ["-monitor"; x]) ; ["-pidfile"; Service.Qemu.pidfile_path domid] ] @@ -2193,15 +2191,14 @@ module Dm_Common = struct let root = Device_common.xenops_domain_path in try (* NB: The response size of this directory call may exceed the default - payload size limit. However, we have an exception that allows oversized - packets. *) + payload size limit. However, we have an exception that allows oversized + packets. *) xs.Xs.directory root - |> List.map (fun domid -> + |> List.concat_map (fun domid -> let path = Printf.sprintf "%s/%s/device/vgpu" root domid in try List.map (fun x -> path ^ "/" ^ x) (xs.Xs.directory path) with Xs_protocol.Enoent _ -> [] ) - |> List.concat |> List.exists (fun vgpu -> try let path = Printf.sprintf "%s/pf" vgpu in @@ -2365,20 +2362,20 @@ module Backend = struct (** Common signature for all the profile backends *) module type Intf = sig (** Vgpu functions that use the dispatcher to choose between different - profile and device-model backends *) + profile and device-model backends *) module Vgpu : sig val device : index:int -> int option end (** Vbd functions that use the dispatcher to choose between different - profile backends *) + profile backends *) module Vbd : sig val qemu_media_change : xs:Ezxenstore_core.Xenstore.Xs.xsh -> device -> string -> string -> unit end (** Vcpu functions that use the dispatcher to choose between different - profile backends *) + profile backends *) module Vcpu : sig val add : xs:Ezxenstore_core.Xenstore.Xs.xsh -> devid:int -> int -> bool -> unit @@ -2393,17 +2390,17 @@ module Backend = struct end (** Dm functions that use the dispatcher to choose between different profile - backends *) + backends *) module Dm : sig val get_vnc_port : xs:Ezxenstore_core.Xenstore.Xs.xsh -> int -> Socket.t option (** [get_vnc_port xenstore domid] returns the dom0 tcp port in which the - vnc server for [domid] can be found *) + vnc server for [domid] can be found *) val assert_can_suspend : xs:Ezxenstore_core.Xenstore.Xs.xsh -> Xenctrl.domid -> unit (** [assert_can_suspend xenstore xc] checks whether suspending is - prevented by QEMU *) + prevented by QEMU *) val suspend : Xenops_task.task_handle @@ -2426,7 +2423,7 @@ module Backend = struct -> 'a -> Forkhelpers.pidty (** [init_daemon task path args domid xenstore ready_path timeout cancel] - returns a forkhelper pid after starting the qemu daemon in dom0 *) + returns a forkhelper pid after starting the qemu daemon in dom0 *) val stop : xs:Ezxenstore_core.Xenstore.Xs.xsh @@ -2444,7 +2441,7 @@ module Backend = struct -> int -> Dm_Common.qemu_args (** [cmdline_of_info xenstore info restore domid] creates the command line - arguments to pass to the qemu wrapper script *) + arguments to pass to the qemu wrapper script *) val after_suspend_image : xs:Ezxenstore_core.Xenstore.Xs.xsh @@ -2453,7 +2450,7 @@ module Backend = struct -> int -> unit (** [after_suspend_image xs qemu_domid domid] hook to execute actions - after the suspend image has been created *) + after the suspend image has been created *) val pci_assign_guest : xs:Ezxenstore_core.Xenstore.Xs.xsh @@ -2464,18 +2461,18 @@ module Backend = struct end (** Implementation of the backend common signature for the qemu-none (PV) - backend *) + backend *) module Qemu_none : Intf = struct module Vgpu = struct let device ~index:_ = None end (** Implementation of the Vbd functions that use the dispatcher for the - qemu-none backend *) + qemu-none backend *) module Vbd = struct let qemu_media_change = Vbd_Common.qemu_media_change end (** Implementation of the Vcpu functions that use the dispatcher for the - qemu-none backend *) + qemu-none backend *) module Vcpu = struct let add = Vcpu_Common.add @@ -2487,7 +2484,7 @@ module Backend = struct end (** Implementation of the Dm functions that use the dispatcher for the - qemu-none backend *) + qemu-none backend *) module Dm = struct let get_vnc_port ~xs domid = Dm_Common.get_vnc_port ~xs domid ~f:(fun () -> @@ -2525,7 +2522,7 @@ module Backend = struct (* Backend.Qemu_none *) (** Implementation of the backend common signature for the - qemu-upstream-compat backend *) + qemu-upstream-compat backend *) module type Qemu_upstream_config = sig module NIC : sig val max_emulated : int @@ -2686,7 +2683,7 @@ module Backend = struct let extra_qemu_args ~nic_type = let mult xs ys = - List.map (fun x -> List.map (fun y -> x ^ "." ^ y) ys) xs |> List.concat + List.concat_map (fun x -> List.map (fun y -> x ^ "." ^ y) ys) xs in List.concat [ @@ -2696,8 +2693,7 @@ module Backend = struct ; mult ["piix3-ide-xen"; "piix3-usb-uhci"; nic_type] ["subvendor_id=0x5853"; "subsystem_id=0x0001"] - |> List.map (fun x -> ["-global"; x]) - |> List.concat + |> List.concat_map (fun x -> ["-global"; x]) ] end @@ -2846,11 +2842,11 @@ module Backend = struct let update_cant_suspend domid xs = let as_msg cmd = Qmp.(Success (Some __LOC__, cmd)) in (* changing this will cause fire_event_on_vm to get called, which will do - a VM.check_state, which will trigger a VM.stat from XAPI to update - migratable state *) + a VM.check_state, which will trigger a VM.stat from XAPI to update + migratable state *) let path = Dm_Common.cant_suspend_reason_path domid in (* This will raise QMP_Error if it can't do it, we catch it and update - xenstore. *) + xenstore. *) match qmp_send_cmd ~may_fail:true domid Qmp.Query_migratable with | Qmp.Unit -> debug "query-migratable precheck passed (domid=%d)" domid ; @@ -2984,7 +2980,7 @@ module Backend = struct module Vgpu = struct let device = DefaultConfig.VGPU.device end (** Implementation of the Vbd functions that use the dispatcher for the - qemu-upstream-compat backend *) + qemu-upstream-compat backend *) module Vbd = struct let cd_of devid = match @@ -3004,8 +3000,8 @@ module Backend = struct internal_error "unexpected disk for devid %d" devid (* parse NBD URI. We are not using the URI module because the - format is not compliant but used by qemu. Using sscanf instead - to recognise and parse the specific URI *) + format is not compliant but used by qemu. Using sscanf instead + to recognise and parse the specific URI *) let is_nbd str = try Scanf.sscanf str "nbd:unix:%s@:exportname=%s" (fun _ _ -> true) with _ -> false @@ -3101,7 +3097,7 @@ module Backend = struct (* Backend.Qemu_upstream_compat.Vbd *) (** Implementation of the Vcpu functions that use the dispatcher for the - qemu-upstream-compat backend *) + qemu-upstream-compat backend *) module Vcpu = struct let add = Vcpu_Common.add @@ -3110,7 +3106,7 @@ module Backend = struct let status = Vcpu_Common.status (* hot(un)plug vcpu using QMP, keeping backwards-compatible xenstored - mechanism *) + mechanism *) let set ~xs ~devid domid online = Vcpu_Common.set ~xs ~devid domid online ; match online with @@ -3156,7 +3152,7 @@ module Backend = struct end (** Implementation of the Dm functions that use the dispatcher for the - qemu-upstream-compat backend *) + qemu-upstream-compat backend *) module Dm = struct let get_vnc_port ~xs domid = Dm_Common.get_vnc_port ~xs domid ~f:(fun () -> @@ -3212,7 +3208,7 @@ module Backend = struct (fun () -> Unix.close save_fd) (* Wait for QEMU's event socket to appear. Connect to it to make sure it - is ready. *) + is ready. *) let wait_event_socket ~task ~name ~domid ~timeout = let finished = ref false in let timeout_ns = Int64.of_float (timeout *. 1e9) in @@ -3296,10 +3292,9 @@ module Backend = struct | Dm_Common.Enabled devices -> let devs = devices - |> List.map (fun (x, y) -> + |> List.concat_map (fun (x, y) -> ["-device"; sprintf "usb-%s,port=%d" x y] ) - |> List.concat in "-usb" :: devs in @@ -3357,13 +3352,12 @@ module Backend = struct ) ; let qmp = ["libxl"; "event"] - |> List.map (fun x -> + |> List.concat_map (fun x -> [ "-qmp" ; sprintf "unix:/var/run/xen/qmp-%s-%d,server,nowait" x domid ] ) - |> List.concat in let pv_device addr = try @@ -3525,11 +3519,11 @@ module Backend = struct (* Backend.Qemu_upstream *) (** Implementation of the backend common signature for the qemu-upstream - backend *) + backend *) module Qemu_upstream_compat = Make_qemu_upstream (Config_qemu_upstream_compat) (** Until the stage 4 defined in the qemu upstream design is implemented, - qemu_upstream behaves as qemu_upstream_compat *) + qemu_upstream behaves as qemu_upstream_compat *) module Qemu_upstream = Qemu_upstream_compat module Qemu_upstream_uefi = Make_qemu_upstream (Config_qemu_upstream_uefi) @@ -3663,7 +3657,7 @@ module Dm = struct () (* the following functions depend on the functions above that use the qemu - backend Q *) + backend Q *) let start_vgpu ~xc:_ ~xs task ?(restore = false) domid vgpus vcpus profile = let open Xenops_interface.Vgpu in diff --git a/ocaml/xenopsd/xc/device_common.ml b/ocaml/xenopsd/xc/device_common.ml index 871628aeef5..89d105e0bfc 100644 --- a/ocaml/xenopsd/xc/device_common.ml +++ b/ocaml/xenopsd/xc/device_common.ml @@ -312,7 +312,7 @@ let parse_backend_link x = let readdir ~xs d = try xs.Xs.directory d with Xs_protocol.Enoent _ -> [] -let to_list ys = List.concat (List.map Option.to_list ys) +let to_list ys = List.concat_map Option.to_list ys let list_kinds ~xs dir = to_list (List.map parse_kind (readdir ~xs dir)) @@ -322,88 +322,79 @@ let list_kinds ~xs dir = to_list (List.map parse_kind (readdir ~xs dir)) let list_frontends ~xs ?for_devids domid = let frontend_dir = sprintf "/xenops/domain/%d/device" domid in let kinds = list_kinds ~xs frontend_dir in - List.concat - (List.map - (fun k -> - let dir = sprintf "%s/%s" frontend_dir (string_of_kind k) in - let devids = - match for_devids with - | None -> - to_list (List.map parse_int (readdir ~xs dir)) - | Some devids -> - (* check that any specified devids are present in frontend_dir *) - List.filter - (fun devid -> - try - ignore (xs.Xs.read (sprintf "%s/%d" dir devid)) ; - true - with _ -> false - ) - devids - in - to_list - (List.map + List.concat_map + (fun k -> + let dir = sprintf "%s/%s" frontend_dir (string_of_kind k) in + let devids = + match for_devids with + | None -> + to_list (List.map parse_int (readdir ~xs dir)) + | Some devids -> + (* check that any specified devids are present in frontend_dir *) + List.filter (fun devid -> - (* domain [domid] believes it has a frontend for device [devid] *) - let frontend = {domid; kind= k; devid} in try - let link = xs.Xs.read (sprintf "%s/%d/backend" dir devid) in - match parse_backend_link link with - | Some b -> - Some {backend= b; frontend} - | None -> - None - with _ -> None + ignore (xs.Xs.read (sprintf "%s/%d" dir devid)) ; + true + with _ -> false ) devids + in + to_list + (List.map + (fun devid -> + (* domain [domid] believes it has a frontend for device [devid] *) + let frontend = {domid; kind= k; devid} in + try + let link = xs.Xs.read (sprintf "%s/%d/backend" dir devid) in + match parse_backend_link link with + | Some b -> + Some {backend= b; frontend} + | None -> + None + with _ -> None ) - ) - kinds + devids + ) ) + kinds (* NB: we only read data from the backend directory. Therefore this gives the "backend's point of view". *) let list_backends ~xs domid = let backend_dir = xs.Xs.getdomainpath domid ^ "/backend" in let kinds = list_kinds ~xs backend_dir in - List.concat - (List.map - (fun k -> - let dir = sprintf "%s/%s" backend_dir (string_of_kind k) in - let domids = to_list (List.map parse_int (readdir ~xs dir)) in - List.concat - (List.map - (fun frontend_domid -> - let dir = - sprintf "%s/%s/%d" backend_dir (string_of_kind k) - frontend_domid - in - let devids = to_list (List.map parse_int (readdir ~xs dir)) in - to_list - (List.map - (fun devid -> - (* domain [domid] believes it has a backend for - [frontend_domid] of type [k] with devid [devid] *) - let backend = {domid; kind= k; devid} in - try - let link = - xs.Xs.read (sprintf "%s/%d/frontend" dir devid) - in - match parse_frontend_link link with - | Some f -> - Some {backend; frontend= f} - | None -> - None - with _ -> None - ) - devids - ) - ) - domids - ) - ) - kinds + List.concat_map + (fun k -> + let dir = sprintf "%s/%s" backend_dir (string_of_kind k) in + let domids = to_list (List.map parse_int (readdir ~xs dir)) in + List.concat_map + (fun frontend_domid -> + let dir = + sprintf "%s/%s/%d" backend_dir (string_of_kind k) frontend_domid + in + let devids = to_list (List.map parse_int (readdir ~xs dir)) in + to_list + (List.map + (fun devid -> + (* domain [domid] believes it has a backend for + [frontend_domid] of type [k] with devid [devid] *) + let backend = {domid; kind= k; devid} in + try + let link = xs.Xs.read (sprintf "%s/%d/frontend" dir devid) in + match parse_frontend_link link with + | Some f -> + Some {backend; frontend= f} + | None -> + None + with _ -> None + ) + devids + ) + ) + domids ) + kinds (** Return a list of devices connecting two domains. Ignore those whose kind we don't recognise *) diff --git a/ocaml/xenopsd/xc/domain.ml b/ocaml/xenopsd/xc/domain.ml index dd3813ff6d9..7b31011aabe 100644 --- a/ocaml/xenopsd/xc/domain.ml +++ b/ocaml/xenopsd/xc/domain.ml @@ -1015,12 +1015,11 @@ let xenguest_args_pv ~domid ~store_port ~store_domid ~console_port let xenguest_args_pvh ~domid ~store_port ~store_domid ~console_port ~console_domid ~memory ~kernel ~cmdline ~modules = let module_args = - List.map + List.concat_map (fun (m, c) -> "-module" :: m :: (match c with Some x -> ["-cmdline"; x] | None -> []) ) modules - |> List.flatten in [ "-mode" diff --git a/ocaml/xenopsd/xc/xenops_server_xen.ml b/ocaml/xenopsd/xc/xenops_server_xen.ml index ee4524cf781..cc201d7f8a1 100644 --- a/ocaml/xenopsd/xc/xenops_server_xen.ml +++ b/ocaml/xenopsd/xc/xenops_server_xen.ml @@ -4222,7 +4222,7 @@ module VIF = struct ] ) srvs - |> List.flatten + |> List.concat in ("pvs-site", s) :: ("pvs-interface", iface) From 2f7deb36aca7b829d66dcb866e103b7033b781eb Mon Sep 17 00:00:00 2001 From: Colin James Date: Tue, 1 Oct 2024 16:25:08 +0100 Subject: [PATCH 082/141] Update .git-blame-ignore-revs Include hash for large-scale replacement of List.flatten and introductions of List.concat_map. Signed-off-by: Colin James --- .git-blame-ignore-revs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index d8259ca9cd8..06bd08f2e4a 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -37,3 +37,6 @@ f43c221ad556bc85870faebc3ce3c9d6e9c2efd8 # strip trailing whitespace 5a003f446391ca05ec791c38c69e93fb1e718e78 + +# prefer concat_map +f1a1ee1c0dc6e228921ebc9e1ac39c2740d649c5 From 450096d0166f1bd21e81dea6081e5804fcfbd3c9 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Tue, 1 Oct 2024 16:36:11 +0100 Subject: [PATCH 083/141] ocaml: fix packages for quicktest and xs-trace tests They were misatributed to xapi, when the binaries are built for other packages Signed-off-by: Pau Ruiz Safont --- dune-project | 2 ++ ocaml/quicktest/dune | 2 +- ocaml/xs-trace/test/dune | 2 +- xapi-tools.opam | 1 + xapi.opam | 1 + 5 files changed, 6 insertions(+), 2 deletions(-) diff --git a/dune-project b/dune-project index fc74adc8a6e..ddcceb428f4 100644 --- a/dune-project +++ b/dune-project @@ -304,6 +304,7 @@ ; 'xapi-tools' will have version ~dev, not 'master' like all the others ; because it is not in xs-opam yet rrd-transport + xapi-tracing-export (alcotest :with-test) (ppx_deriving_rpc :with-test) (qcheck-core :with-test) @@ -391,6 +392,7 @@ (xapi-stdext-zerocheck (= :version)) (xapi-test-utils :with-test) (xapi-tracing (= :version)) + (xapi-tracing-export (= :version)) (xapi-types (= :version)) xenctrl ; for quicktest xenstore_transport diff --git a/ocaml/quicktest/dune b/ocaml/quicktest/dune index 0ac6a171acd..ac0bc21c193 100644 --- a/ocaml/quicktest/dune +++ b/ocaml/quicktest/dune @@ -45,6 +45,6 @@ (rule (alias runtest) - (package xapi) + (package xapi-debug) (action (run ./quicktest.exe -skip-xapi -- list)) ) diff --git a/ocaml/xs-trace/test/dune b/ocaml/xs-trace/test/dune index d794381a742..06e45a36165 100644 --- a/ocaml/xs-trace/test/dune +++ b/ocaml/xs-trace/test/dune @@ -5,6 +5,6 @@ (rule (alias runtest) - (package xapi) + (package xapi-tools) (deps test-xs-trace.sh ../xs_trace.exe test-source.json test-source.ndjson test_xs_trace.exe) (action (run bash test-xs-trace.sh))) diff --git a/xapi-tools.opam b/xapi-tools.opam index ba0f73b479f..c897251fa55 100644 --- a/xapi-tools.opam +++ b/xapi-tools.opam @@ -28,6 +28,7 @@ depends: [ "xmlm" "yojson" "rrd-transport" + "xapi-tracing-export" "alcotest" {with-test} "ppx_deriving_rpc" {with-test} "qcheck-core" {with-test} diff --git a/xapi.opam b/xapi.opam index 3a850a4e359..098d8463442 100644 --- a/xapi.opam +++ b/xapi.opam @@ -84,6 +84,7 @@ depends: [ "xapi-stdext-zerocheck" {= version} "xapi-test-utils" {with-test} "xapi-tracing" {= version} + "xapi-tracing-export" {= version} "xapi-types" {= version} "xenctrl" "xenstore_transport" From 6d3edbbb2b045eee48097374c16125d6ebb571c6 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Wed, 2 Oct 2024 08:46:16 +0100 Subject: [PATCH 084/141] CA-399956 - xe autocompletion: Fix autocompletion for words with given prefix For unresolved reasons, 'compgen -W' would not work as intended occasionally: 'xe host-param-get uuid=xxx param-na' would complete to: 'xe host-param-get uuid=xxx param-name=' but typing: 'xe host-param-get uuid=xxx param-name=so' would not complete it to: 'xe host-param-get uuid=xxx param-name=software-version' It's unlikely to be a bug in Bash since I couldn't find anything in the changelog and upgrading the package did not fix it; nor could I reproduce it in a different program with the same inputs - it is likely that something in the environment breaks compgen. Switch to pure Bash string processing instead. Signed-off-by: Andrii Sultanov --- ocaml/xe-cli/bash-completion | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ocaml/xe-cli/bash-completion b/ocaml/xe-cli/bash-completion index 98df8be24fb..b4ba6127138 100644 --- a/ocaml/xe-cli/bash-completion +++ b/ocaml/xe-cli/bash-completion @@ -771,7 +771,12 @@ __preprocess_suggestions() wordlist=$( echo "$1" | \ sed -re 's/(^|[^\])((\\\\)*),,*/\1\2\n/g' -e 's/\\,/,/g' -e 's/\\\\/\\/g' | \ sed -e 's/ *$//') - compgen -W "$wordlist" "$prefix" + local IFS=$'\n' + for word in $wordlist; do + if [[ "$word" =~ ^$prefix.* ]]; then + echo "$word" + fi + done } # set_completions suggestions current_prefix description_cmd From 182ed4775479c68ae195ede2e11a4ee5797acfce Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Wed, 2 Oct 2024 10:11:17 +0000 Subject: [PATCH 085/141] CA-399963: Block VM.start_on is feature flag is disabled This was missed in cf8ba8b1f05c3490953f824c678528fba9a76bd2. Signed-off-by: Rob Hoes --- ocaml/xapi/message_forwarding.ml | 1 + 1 file changed, 1 insertion(+) diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index 7c4af7b0f4b..c85dc2cb025 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -1923,6 +1923,7 @@ functor let start_on ~__context ~vm ~host ~start_paused ~force = if Helpers.rolling_upgrade_in_progress ~__context then Helpers.assert_host_has_highest_version_in_pool ~__context ~host ; + Pool_features.assert_enabled ~__context ~f:Features.VM_start ; Xapi_vm_helpers.assert_matches_control_domain_affinity ~__context ~self:vm ~host ; (* Prevent VM start on a host that is evacuating *) From 1d42c05de14f1a3b4935e7bef1f957292112aa38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 2 Oct 2024 11:54:30 +0100 Subject: [PATCH 086/141] test(xapi_globs): add a unit test that xapi globs parsing works MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We had a bug that would not fail any unit tests, but XAPI would fail to start This reproduces the error now: ``` [invalid] compare: functional value Raised by primitive operation at Xapi_globs.other_options.(fun) in file "ocaml/xapi/xapi_globs.ml", line 1622, characters 17-59 Called from Xcp_service.Config_file.dump.(fun) in file "ocaml/xapi-idl/lib/xcp_service.ml", line 145, characters 34-46 Called from Stdlib__List.iter in file "list.ml", line 110, characters 12-15 Called from Xcp_service.configure_common in file "ocaml/xapi-idl/lib/xcp_service.ml", line 432, characters 2-30 Called from Xcp_service.configure in file "ocaml/xapi-idl/lib/xcp_service.ml", line 461, characters 4-259 Called from Alcotest_engine__Core.Make.protect_test.(fun) in file "src/alcotest-engine/core.ml", line 181, characters 17-23 Called from Alcotest_engine__Monad.Identity.catch in file "src/alcotest-engine/monad.ml", line 24, characters 31-35 ``` Also print the Failure message in Xcp_service.configure, otherwise it'd exit 1 without saying why if you supply invalid cmdline arguments. Signed-off-by: Edwin Török --- ocaml/tests/test_xapi_helpers.ml | 11 ++++++++++- ocaml/xapi-idl/lib/xcp_service.ml | 6 +++--- ocaml/xapi-idl/lib/xcp_service.mli | 3 ++- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/ocaml/tests/test_xapi_helpers.ml b/ocaml/tests/test_xapi_helpers.ml index 172e5c6e6a1..587a0888f6b 100644 --- a/ocaml/tests/test_xapi_helpers.ml +++ b/ocaml/tests/test_xapi_helpers.ml @@ -40,6 +40,15 @@ let filtering_test = ) strings +let test_xapi_configure () = + Xcp_service.configure + ~argv:[|Sys.argv.(0)|] + ~options:Xapi_globs.all_options () + let () = Suite_init.harness_init () ; - Alcotest.run "Test XAPI Helpers suite" [("Test_xapi_helpers", filtering_test)] + Alcotest.run "Test XAPI Helpers suite" + [ + ("Test_xapi_helpers", filtering_test) + ; ("Test_xapi_configure", [("configure", `Quick, test_xapi_configure)]) + ] diff --git a/ocaml/xapi-idl/lib/xcp_service.ml b/ocaml/xapi-idl/lib/xcp_service.ml index 645b04d0864..a7683091323 100644 --- a/ocaml/xapi-idl/lib/xcp_service.ml +++ b/ocaml/xapi-idl/lib/xcp_service.ml @@ -456,15 +456,15 @@ let configure_common ~options ~resources arg_parse_fn = resources ; Sys.set_signal Sys.sigpipe Sys.Signal_ignore -let configure ?(options = []) ?(resources = []) () = +let configure ?(argv = Sys.argv) ?(options = []) ?(resources = []) () = try configure_common ~options ~resources (fun config_spec -> - Arg.parse + Arg.parse_argv argv (Arg.align (arg_spec config_spec)) (fun _ -> failwith "Invalid argument") (Printf.sprintf "Usage: %s [-config filename]" Sys.argv.(0)) ) - with Failure _ -> exit 1 + with Failure msg -> prerr_endline msg ; flush stderr ; exit 1 let configure2 ~name ~version ~doc ?(options = []) ?(resources = []) () = configure_common ~options ~resources @@ fun config_spec -> diff --git a/ocaml/xapi-idl/lib/xcp_service.mli b/ocaml/xapi-idl/lib/xcp_service.mli index 05196bc03a0..98f35bea528 100644 --- a/ocaml/xapi-idl/lib/xcp_service.mli +++ b/ocaml/xapi-idl/lib/xcp_service.mli @@ -28,7 +28,8 @@ type res = { ; perms: Unix.access_permission list } -val configure : ?options:opt list -> ?resources:res list -> unit -> unit +val configure : + ?argv:string array -> ?options:opt list -> ?resources:res list -> unit -> unit val configure2 : name:string From a3ab2287e91167c4fd5996f7ef62dd560c2fe921 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 2 Oct 2024 11:30:59 +0100 Subject: [PATCH 087/141] fix(xapi)): cannot compare functional values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: 6635a00d68e5 ("CP-49136: Introduce PRNG for generating non-secret UUIDs") Signed-off-by: Edwin Török --- ocaml/xapi/xapi_globs.ml | 2 +- quality-gate.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index 1b0d7c9bdd5..29e404fa224 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -1619,7 +1619,7 @@ let other_options = ; ( "use-prng-uuid-gen" (* eventually this'll be the default, except for Sessions *) , Arg.Unit (fun () -> Uuidx.make_default := Uuidx.make_uuid_fast) - , (fun () -> !Uuidx.make_default = Uuidx.make_uuid_fast |> string_of_bool) + , (fun () -> !Uuidx.make_default == Uuidx.make_uuid_fast |> string_of_bool) , "Use PRNG based UUID generator instead of CSPRNG" ) ; ( "reuse-pool-sessions" diff --git a/quality-gate.sh b/quality-gate.sh index 9c3d3c2b5f8..eb2d4daed90 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -40,7 +40,7 @@ mli-files () { } structural-equality () { - N=9 + N=10 EQ=$(git grep -r --count ' == ' -- '**/*.ml' ':!ocaml/sdk-gen/**/*.ml' | cut -d ':' -f 2 | paste -sd+ - | bc) if [ "$EQ" -eq "$N" ]; then echo "OK counted $EQ usages of ' == '" From 522c64b028733e7331dddc1ecc764e5a408646ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 2 Oct 2024 14:05:41 +0100 Subject: [PATCH 088/141] fix(ci): make rrdd-plugin.xenctrl internal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Edwin Török --- ocaml/xcp-rrdd/bin/rrdp-cpu/dune | 2 +- ocaml/xcp-rrdd/bin/rrdp-iostat/dune | 2 +- ocaml/xcp-rrdd/bin/rrdp-netdev/dune | 2 +- ocaml/xcp-rrdd/lib/plugin/dune | 1 - 4 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ocaml/xcp-rrdd/bin/rrdp-cpu/dune b/ocaml/xcp-rrdd/bin/rrdp-cpu/dune index b654417bf0a..ced826c63a2 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-cpu/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-cpu/dune @@ -4,7 +4,7 @@ (libraries astring rrdd-plugin - rrdd-plugin.xenctrl + rrdd_plugin_xenctrl rrdd_plugins_libs xapi-idl.rrd xapi-log diff --git a/ocaml/xcp-rrdd/bin/rrdp-iostat/dune b/ocaml/xcp-rrdd/bin/rrdp-iostat/dune index 3880709282a..03f7b00a5fc 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-iostat/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-iostat/dune @@ -10,7 +10,7 @@ mtime mtime.clock.os rrdd-plugin - rrdd-plugin.xenctrl + rrdd_plugin_xenctrl rrdd_plugins_libs str stringext diff --git a/ocaml/xcp-rrdd/bin/rrdp-netdev/dune b/ocaml/xcp-rrdd/bin/rrdp-netdev/dune index 7c538027368..fe68c431ab4 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-netdev/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-netdev/dune @@ -4,7 +4,7 @@ (libraries astring rrdd-plugin - rrdd-plugin.xenctrl + rrdd_plugin_xenctrl rrdd_plugins_libs xapi-idl xapi-idl.network diff --git a/ocaml/xcp-rrdd/lib/plugin/dune b/ocaml/xcp-rrdd/lib/plugin/dune index b2370504780..b927bcc1614 100644 --- a/ocaml/xcp-rrdd/lib/plugin/dune +++ b/ocaml/xcp-rrdd/lib/plugin/dune @@ -24,7 +24,6 @@ (library (name rrdd_plugin_xenctrl) - (public_name rrdd-plugin.xenctrl) (flags (:standard -bin-annot)) (wrapped false) (modules xenctrl_lib) From 52d5d209e892184479d0d31fe0f296024de81c33 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Wed, 2 Oct 2024 11:22:23 +0100 Subject: [PATCH 089/141] opam: add configure step for xenopsd This is needed to generate the vif script and make it installable Signed-off-by: Pau Ruiz Safont --- xapi-tools.opam | 3 ++- xapi-tools.opam.template | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 xapi-tools.opam.template diff --git a/xapi-tools.opam b/xapi-tools.opam index c897251fa55..96b494cef2d 100644 --- a/xapi-tools.opam +++ b/xapi-tools.opam @@ -36,7 +36,9 @@ depends: [ "xenstore_transport" {with-test} "odoc" {with-doc} ] +dev-repo: "git+https://github.com/xapi-project/xen-api.git" build: [ + ["./configure"] ["dune" "subst"] {dev} [ "dune" @@ -50,4 +52,3 @@ build: [ "@doc" {with-doc} ] ] -dev-repo: "git+https://github.com/xapi-project/xen-api.git" diff --git a/xapi-tools.opam.template b/xapi-tools.opam.template new file mode 100644 index 00000000000..fe7e2cb0c27 --- /dev/null +++ b/xapi-tools.opam.template @@ -0,0 +1,15 @@ +build: [ + ["./configure"] + ["dune" "subst"] {dev} + [ + "dune" + "build" + "-p" + name + "-j" + jobs + "@install" + "@runtest" {with-test} + "@doc" {with-doc} + ] +] From 5d9d9a4d778c5facae639c973a8cc75dd5f7d097 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 2 Oct 2024 14:21:18 +0100 Subject: [PATCH 090/141] fix(xcp_service.configure): do not fail on --help MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: 1d42c05de14f ("test(xapi_globs): add a unit test that xapi globs parsing works") Signed-off-by: Edwin Török --- ocaml/xapi-idl/lib/xcp_service.ml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi-idl/lib/xcp_service.ml b/ocaml/xapi-idl/lib/xcp_service.ml index a7683091323..01c65bc49fb 100644 --- a/ocaml/xapi-idl/lib/xcp_service.ml +++ b/ocaml/xapi-idl/lib/xcp_service.ml @@ -464,7 +464,13 @@ let configure ?(argv = Sys.argv) ?(options = []) ?(resources = []) () = (fun _ -> failwith "Invalid argument") (Printf.sprintf "Usage: %s [-config filename]" Sys.argv.(0)) ) - with Failure msg -> prerr_endline msg ; flush stderr ; exit 1 + with + | Failure msg -> + prerr_endline msg ; flush stderr ; exit 1 + | Arg.Bad msg -> + Printf.eprintf "%s" msg ; exit 2 + | Arg.Help msg -> + Printf.printf "%s" msg ; exit 0 let configure2 ~name ~version ~doc ?(options = []) ?(resources = []) () = configure_common ~options ~resources @@ fun config_spec -> From dd2240673087ca04318072bcaead1423ed15e5c3 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Wed, 2 Oct 2024 14:28:40 +0100 Subject: [PATCH 091/141] opam: add missing dependencies Signed-off-by: Pau Ruiz Safont --- dune-project | 7 +++++++ xapi-debug.opam | 5 +++++ xapi-stdext-unix.opam | 1 + xapi-tools.opam | 1 + 4 files changed, 14 insertions(+) diff --git a/dune-project b/dune-project index ddcceb428f4..36e6e4e5766 100644 --- a/dune-project +++ b/dune-project @@ -270,6 +270,11 @@ uutf x509 xapi-backtrace + xapi-log + xapi-types + xapi-stdext-pervasives + xapi-stdext-unix + xen-api-client xenctrl xenstore_transport xmlm @@ -305,6 +310,7 @@ ; because it is not in xs-opam yet rrd-transport xapi-tracing-export + xen-api-client (alcotest :with-test) (ppx_deriving_rpc :with-test) (qcheck-core :with-test) @@ -712,6 +718,7 @@ This package provides an Lwt compatible interface to the library.") astring base-unix (bisect_ppx :with-test) + (clock (and (= :version) :with-test)) (fd-send-recv (>= 2.0.0)) fmt integers diff --git a/xapi-debug.opam b/xapi-debug.opam index 5073a267be2..025e969e140 100644 --- a/xapi-debug.opam +++ b/xapi-debug.opam @@ -53,6 +53,11 @@ depends: [ "uutf" "x509" "xapi-backtrace" + "xapi-log" + "xapi-types" + "xapi-stdext-pervasives" + "xapi-stdext-unix" + "xen-api-client" "xenctrl" "xenstore_transport" "xmlm" diff --git a/xapi-stdext-unix.opam b/xapi-stdext-unix.opam index 41760ac6a8e..e41eefb9efa 100644 --- a/xapi-stdext-unix.opam +++ b/xapi-stdext-unix.opam @@ -13,6 +13,7 @@ depends: [ "astring" "base-unix" "bisect_ppx" {with-test} + "clock" {= version & with-test} "fd-send-recv" {>= "2.0.0"} "fmt" "integers" diff --git a/xapi-tools.opam b/xapi-tools.opam index 96b494cef2d..852102302dd 100644 --- a/xapi-tools.opam +++ b/xapi-tools.opam @@ -29,6 +29,7 @@ depends: [ "yojson" "rrd-transport" "xapi-tracing-export" + "xen-api-client" "alcotest" {with-test} "ppx_deriving_rpc" {with-test} "qcheck-core" {with-test} From 43d9ee9bb4fbb12bef9c64f0ac4d07ce1b2759b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Wed, 2 Oct 2024 14:53:27 +0100 Subject: [PATCH 092/141] fix(test): avoid running XAPI hooks in unit tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If you install the XAPI RPMs in your koji build environment (e.g. to build a package that depends on XAPI) then you couldn't build XAPI again anymore because its unit tests were failing. They were failing because they found some xapi hooks installed by the previous version of XAPI, whereas normally there'd be none when the unit tests are running. Disable running XAPI hooks during unit test, even if present we are not expected to run them. ``` [exception] Unix.Unix_error(Unix.ENOENT, "connect", "") Raised at Forkhelpers.execute_command_get_output_inner.(fun) in file "ocaml/forkexecd/lib/forkhelpers.ml", line 376, characters 10-19 Called from Xapi_stdext_pervasives__Pervasiveext.finally in file "ocaml/libs/xapi-stdext/lib/xapi-stdext-pervasives/pervasiveext.ml", line 24, characters 8-14 Re-raised at Xapi_stdext_pervasives__Pervasiveext.finally in file "ocaml/libs/xapi-stdext/lib/xapi-stdext-pervasives/pervasiveext.ml", line 39, characters 6-15 Called from Xapi_hooks.execute_hook.(fun) in file "ocaml/xapi/xapi_hooks.ml", line 77, characters 10-113 Called from Stdlib__Array.iter in file "array.ml", line 95, characters 31-48 Called from Xapi_host.destroy in file "ocaml/xapi/xapi_host.ml", line 1108, characters 2-98 Called from Dune__exe__Test_cluster_host.test_forget in file "ocaml/tests/test_cluster_host.ml", line 192, characters 2-42 Called from Alcotest_engine__Core.Make.protect_test.(fun) in file "src/alcotest-engine/core.ml", line 181, characters 17-23 Called from Alcotest_engine__Monad.Identity.catch in file "src/alcotest-engine/monad.ml", line 24, characters 31-35 ``` Signed-off-by: Edwin Török --- ocaml/tests/common/suite_init.ml | 1 + ocaml/xapi/xapi_hooks.ml | 69 +++++++++++++++++--------------- 2 files changed, 37 insertions(+), 33 deletions(-) diff --git a/ocaml/tests/common/suite_init.ml b/ocaml/tests/common/suite_init.ml index 8012ff81986..e63deae17b5 100644 --- a/ocaml/tests/common/suite_init.ml +++ b/ocaml/tests/common/suite_init.ml @@ -1,5 +1,6 @@ let harness_init () = (* before any calls to XAPI code, to catch early uses of Unix.select *) + Atomic.set Xapi_hooks.in_test true ; Xapi_stdext_unix.Unixext.test_open 1024 ; Xapi_stdext_unix.Unixext.mkdir_safe Test_common.working_area 0o755 ; (* Alcotest hides the standard output of successful tests, diff --git a/ocaml/xapi/xapi_hooks.ml b/ocaml/xapi/xapi_hooks.ml index ecc1a258063..2f9edaff073 100644 --- a/ocaml/xapi/xapi_hooks.ml +++ b/ocaml/xapi/xapi_hooks.ml @@ -64,40 +64,43 @@ let list_individual_hooks ~script_name = ) else [||] +let in_test = Atomic.make false + let execute_hook ~__context ~script_name ~args ~reason = - let args = args @ ["-reason"; reason] in - let scripts = list_individual_hooks ~script_name in - let script_dir = Filename.concat !Xapi_globs.xapi_hooks_root script_name in - Array.iter - (fun script -> - try - debug "Executing hook '%s/%s' with args [ %s ]" script_name script - (String.concat "; " args) ; - let os, es = - Forkhelpers.execute_command_get_output - (Filename.concat script_dir script) - args - in - debug - "%s: Output of executing hook '%s/%s' with args [ %s ] is %s, err is \ - %s" - __FUNCTION__ script_name script (String.concat "; " args) os es - with - | Forkhelpers.Spawn_internal_error (_, stdout, Unix.WEXITED i) - (* i<>0 since that case does not generate exn *) - -> - if i = exitcode_log_and_continue then - debug "Hook '%s/%s' with args [ %s ] logged '%s'" script_name script - (String.concat "; " args) (String.escaped stdout) - else - raise - (Api_errors.Server_error - ( Api_errors.xapi_hook_failed - , [script_name ^ "/" ^ script; reason; stdout; string_of_int i] - ) - ) - ) - scripts + if not (Atomic.get in_test) then + let args = args @ ["-reason"; reason] in + let scripts = list_individual_hooks ~script_name in + let script_dir = Filename.concat !Xapi_globs.xapi_hooks_root script_name in + Array.iter + (fun script -> + try + debug "Executing hook '%s/%s' with args [ %s ]" script_name script + (String.concat "; " args) ; + let os, es = + Forkhelpers.execute_command_get_output + (Filename.concat script_dir script) + args + in + debug + "%s: Output of executing hook '%s/%s' with args [ %s ] is %s, err \ + is %s" + __FUNCTION__ script_name script (String.concat "; " args) os es + with + | Forkhelpers.Spawn_internal_error (_, stdout, Unix.WEXITED i) + (* i<>0 since that case does not generate exn *) + -> + if i = exitcode_log_and_continue then + debug "Hook '%s/%s' with args [ %s ] logged '%s'" script_name script + (String.concat "; " args) (String.escaped stdout) + else + raise + (Api_errors.Server_error + ( Api_errors.xapi_hook_failed + , [script_name ^ "/" ^ script; reason; stdout; string_of_int i] + ) + ) + ) + scripts let execute_vm_hook ~__context ~reason ~vm = let vmuuid = Db.VM.get_uuid ~__context ~self:vm in From 08661ddc7f632e5da5101a809115b9a9a1492db4 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Thu, 3 Oct 2024 08:57:39 +0100 Subject: [PATCH 093/141] xapi-storage-script: attach tests to the package Otherwise opam + dune will try to run them as part of all the packages in the repository. Signed-off-by: Pau Ruiz Safont --- ocaml/xapi-storage-script/dune | 1 + 1 file changed, 1 insertion(+) diff --git a/ocaml/xapi-storage-script/dune b/ocaml/xapi-storage-script/dune index 06e912ee9bb..fc41ae7e7dc 100644 --- a/ocaml/xapi-storage-script/dune +++ b/ocaml/xapi-storage-script/dune @@ -15,6 +15,7 @@ (test (name test_lib) (modules test_lib) + (package xapi-storage-script) (libraries alcotest alcotest-lwt lwt fmt private) ) From 863469a6c297c93cfa1194eb3adeef2b4e980341 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 4 Oct 2024 09:21:13 +0100 Subject: [PATCH 094/141] message-switch: remove dependency on async binaries The stresstest rule still contained references to async binaries. I haven't seen any other reference to async in the dune files Signed-off-by: Pau Ruiz Safont --- ocaml/message-switch/core_test/dune | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ocaml/message-switch/core_test/dune b/ocaml/message-switch/core_test/dune index a7f0396538d..92317ba71c3 100644 --- a/ocaml/message-switch/core_test/dune +++ b/ocaml/message-switch/core_test/dune @@ -5,7 +5,7 @@ server_unix_main lock_test_lwt ) - (modules + (modules client_unix_main server_unix_main lock_test_lwt @@ -48,8 +48,6 @@ (deps client_unix_main.exe server_unix_main.exe - async/client_async_main.exe - async/server_async_main.exe lwt/client_main.exe lwt/server_main.exe lwt/link_test_main.exe From f6097e6c1e317f547d02d984824e5f0a98b7c1a9 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 4 Oct 2024 11:32:49 +0100 Subject: [PATCH 095/141] xapi-stdect-unix: catch exceptions when testing the server Setting up the server for the tests sometimes fails to write the new state to stdout. Catch an exceptions while binding the socket to print them and make the issue more visible. Signed-off-by: Pau Ruiz Safont --- .../lib/xapi-stdext-unix/test/test_systemd.ml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/test_systemd.ml b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/test_systemd.ml index 39a0a94a153..f20daf454b9 100644 --- a/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/test_systemd.ml +++ b/ocaml/libs/xapi-stdext/lib/xapi-stdext-unix/test/test_systemd.ml @@ -1,4 +1,4 @@ -let _ = +let () = let module Daemon = Xapi_stdext_unix.Unixext.Daemon in let notify_test () = if Daemon.systemd_notify Daemon.State.Ready then @@ -22,15 +22,18 @@ let _ = ) else temp_path in - Unix.( - let sock = socket PF_UNIX SOCK_DGRAM 0 ~cloexec:true in - bind sock (ADDR_UNIX socket_path) ; + let sock = Unix.(socket PF_UNIX SOCK_DGRAM 0 ~cloexec:true) in + try + Unix.bind sock (Unix.ADDR_UNIX socket_path) ; let b = Bytes.create 1024 in - let i, _ = recvfrom sock b 0 1024 [] in + let i, _ = Unix.recvfrom sock b 0 1024 [] in print_endline (Bytes.sub_string b 0 i) ; - close sock - ) + Unix.close sock + with e -> + print_endline (Printexc.to_string e) ; + exit 5 in + let booted_test () = if Daemon.systemd_booted () then ( print_endline "Booted with systemd" ; From e3675a6b70a039243169b1f3e1469ceefafb26de Mon Sep 17 00:00:00 2001 From: Steven Woods Date: Thu, 3 Oct 2024 15:28:30 +0100 Subject: [PATCH 096/141] CP-51714: Remove noisy xenopsd debug logs Signed-off-by: Steven Woods --- ocaml/xapi/xapi_xenops.ml | 79 +++++++-------------------- ocaml/xenopsd/xc/xenops_server_xen.ml | 5 +- 2 files changed, 20 insertions(+), 64 deletions(-) diff --git a/ocaml/xapi/xapi_xenops.ml b/ocaml/xapi/xapi_xenops.ml index 9b8b73f145c..1d17bc5b768 100644 --- a/ocaml/xapi/xapi_xenops.ml +++ b/ocaml/xapi/xapi_xenops.ml @@ -1864,20 +1864,14 @@ let update_vm ~__context id = else let self = Db.VM.get_by_uuid ~__context ~uuid:id in let localhost = Helpers.get_localhost ~__context in - if Db.VM.get_resident_on ~__context ~self <> localhost then - debug "xenopsd event: ignoring event for VM (VM %s not resident)" id - else + if Db.VM.get_resident_on ~__context ~self = localhost then let previous = Xenops_cache.find_vm id in let dbg = Context.string_of_task_and_tracing __context in let module Client = (val make_client (queue_of_vm ~__context ~self) : XENOPS) in let info = try Some (Client.VM.stat dbg id) with _ -> None in - if Option.map snd info = previous then - debug - "xenopsd event: ignoring event for VM %s: metadata has not changed" - id - else ( + if Option.map snd info <> previous then ( debug "xenopsd event: processing event for VM %s" id ; if info = None then debug "xenopsd event: VM state missing: assuming VM has shut down" ; @@ -2438,27 +2432,19 @@ let update_vm ~__context id = let update_vbd ~__context (id : string * string) = try if Events_from_xenopsd.are_suppressed (fst id) then - debug "xenopsd event: ignoring event for VM (VM %s migrating away)" + debug "xenopsd event: ignoring event for VBD (VM %s migrating away)" (fst id) else let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in let localhost = Helpers.get_localhost ~__context in - if Db.VM.get_resident_on ~__context ~self:vm <> localhost then - debug "xenopsd event: ignoring event for VBD (VM %s not resident)" - (fst id) - else + if Db.VM.get_resident_on ~__context ~self:vm = localhost then let previous = Xenops_cache.find_vbd id in let dbg = Context.string_of_task_and_tracing __context in let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in let info = try Some (Client.VBD.stat dbg id) with _ -> None in - if Option.map snd info = previous then - debug - "xenopsd event: ignoring event for VBD %s.%s: metadata has not \ - changed" - (fst id) (snd id) - else + if Option.map snd info <> previous then ( let vbds = Db.VM.get_VBDs ~__context ~self:vm in let vbdrs = List.map @@ -2541,6 +2527,7 @@ let update_vbd ~__context (id : string * string) = if not (Db.VBD.get_empty ~__context ~self:vbd) then let vdi = Db.VBD.get_VDI ~__context ~self:vbd in Xapi_vdi.update_allowed_operations ~__context ~self:vdi + ) with e -> error "xenopsd event: Caught %s while updating VBD" (string_of_exn e) @@ -2552,22 +2539,14 @@ let update_vif ~__context id = else let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in let localhost = Helpers.get_localhost ~__context in - if Db.VM.get_resident_on ~__context ~self:vm <> localhost then - debug "xenopsd event: ignoring event for VIF (VM %s not resident)" - (fst id) - else + if Db.VM.get_resident_on ~__context ~self:vm = localhost then let previous = Xenops_cache.find_vif id in let dbg = Context.string_of_task_and_tracing __context in let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in let info = try Some (Client.VIF.stat dbg id) with _ -> None in - if Option.map snd info = previous then - debug - "xenopsd event: ignoring event for VIF %s.%s: metadata has not \ - changed" - (fst id) (snd id) - else + if Option.map snd info <> previous then ( let vifs = Db.VM.get_VIFs ~__context ~self:vm in let vifrs = List.map @@ -2656,6 +2635,7 @@ let update_vif ~__context id = info ; Xenops_cache.update_vif id (Option.map snd info) ; Xapi_vif_helpers.update_allowed_operations ~__context ~self:vif + ) with e -> error "xenopsd event: Caught %s while updating VIF" (string_of_exn e) @@ -2667,22 +2647,14 @@ let update_pci ~__context id = else let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in let localhost = Helpers.get_localhost ~__context in - if Db.VM.get_resident_on ~__context ~self:vm <> localhost then - debug "xenopsd event: ignoring event for PCI (VM %s not resident)" - (fst id) - else + if Db.VM.get_resident_on ~__context ~self:vm = localhost then let previous = Xenops_cache.find_pci id in let dbg = Context.string_of_task_and_tracing __context in let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in let info = try Some (Client.PCI.stat dbg id) with _ -> None in - if Option.map snd info = previous then - debug - "xenopsd event: ignoring event for PCI %s.%s: metadata has not \ - changed" - (fst id) (snd id) - else + if Option.map snd info <> previous then ( let pcis = Db.Host.get_PCIs ~__context ~self:localhost in let pcirs = List.map @@ -2731,6 +2703,7 @@ let update_pci ~__context id = ) info ; Xenops_cache.update_pci id (Option.map snd info) + ) with e -> error "xenopsd event: Caught %s while updating PCI" (string_of_exn e) @@ -2742,22 +2715,14 @@ let update_vgpu ~__context id = else let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in let localhost = Helpers.get_localhost ~__context in - if Db.VM.get_resident_on ~__context ~self:vm <> localhost then - debug "xenopsd event: ignoring event for VGPU (VM %s not resident)" - (fst id) - else + if Db.VM.get_resident_on ~__context ~self:vm = localhost then let previous = Xenops_cache.find_vgpu id in let dbg = Context.string_of_task_and_tracing __context in let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in let info = try Some (Client.VGPU.stat dbg id) with _ -> None in - if Option.map snd info = previous then - debug - "xenopsd event: ignoring event for VGPU %s.%s: metadata has not \ - changed" - (fst id) (snd id) - else + if Option.map snd info <> previous then ( let vgpus = Db.VM.get_VGPUs ~__context ~self:vm in let vgpu_records = List.map @@ -2802,33 +2767,26 @@ let update_vgpu ~__context id = ) info ; Xenops_cache.update_vgpu id (Option.map snd info) + ) with e -> error "xenopsd event: Caught %s while updating VGPU" (string_of_exn e) let update_vusb ~__context (id : string * string) = try if Events_from_xenopsd.are_suppressed (fst id) then - debug "xenopsd event: ignoring event for VM (VM %s migrating away)" + debug "xenopsd event: ignoring event for VUSB (VM %s migrating away)" (fst id) else let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in let localhost = Helpers.get_localhost ~__context in - if Db.VM.get_resident_on ~__context ~self:vm <> localhost then - debug "xenopsd event: ignoring event for VUSB (VM %s not resident)" - (fst id) - else + if Db.VM.get_resident_on ~__context ~self:vm = localhost then let previous = Xenops_cache.find_vusb id in let dbg = Context.string_of_task_and_tracing __context in let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in let info = try Some (Client.VUSB.stat dbg id) with _ -> None in - if Option.map snd info = previous then - debug - "xenopsd event: ignoring event for VUSB %s.%s: metadata has not \ - changed" - (fst id) (snd id) - else + if Option.map snd info <> previous then ( let pusb, _ = Db.VM.get_VUSBs ~__context ~self:vm |> List.map (fun self -> Db.VUSB.get_USB_group ~__context ~self) @@ -2853,6 +2811,7 @@ let update_vusb ~__context (id : string * string) = info ; Xenops_cache.update_vusb id (Option.map snd info) ; Xapi_vusb_helpers.update_allowed_operations ~__context ~self:vusb + ) with e -> error "xenopsd event: Caught %s while updating VUSB" (string_of_exn e) diff --git a/ocaml/xenopsd/xc/xenops_server_xen.ml b/ocaml/xenopsd/xc/xenops_server_xen.ml index cc201d7f8a1..d97ddede77b 100644 --- a/ocaml/xenopsd/xc/xenops_server_xen.ml +++ b/ocaml/xenopsd/xc/xenops_server_xen.ml @@ -2376,11 +2376,8 @@ module VM = struct | Dynamic.Vm id when id = vm.Vm.id -> debug "EVENT on our VM: %s" id ; Some () - | Dynamic.Vm id -> - debug "EVENT on other VM: %s" id ; - None | _ -> - debug "OTHER EVENT" ; None + None in let vm_has_shutdown () = on_domain task vm (fun _ _ _ _ di -> di.Xenctrl.shutdown) From 9d0949dbe1b40a45d24d541b1c90aa1d77373995 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Tue, 8 Oct 2024 13:31:10 +0100 Subject: [PATCH 097/141] maintenance: avoid deprecated bindings in uuidm 0.9.9 Signed-off-by: Pau Ruiz Safont --- ocaml/idl/ocaml_backend/gen_rbac.ml | 2 +- .../libs/ezxenstore/watch/ez_xenctrl_uuid.ml | 2 +- ocaml/libs/uuid/uuidx.ml | 6 ++--- ocaml/libs/vhd/vhd_format/f.ml | 27 ++++++++++--------- ocaml/libs/xapi-inventory/lib/inventory.ml | 8 ++++-- ocaml/tests/test_ref.ml | 3 ++- ocaml/xapi-guard/test/cache_test.ml | 6 ++++- ocaml/xapi-guard/test/xapi_guard_test.ml | 2 +- ocaml/xapi-idl/lib/uuidm_rpc_type.ml | 6 ++++- 9 files changed, 38 insertions(+), 24 deletions(-) diff --git a/ocaml/idl/ocaml_backend/gen_rbac.ml b/ocaml/idl/ocaml_backend/gen_rbac.ml index 64f8f4200ef..cda3d1f2f8d 100644 --- a/ocaml/idl/ocaml_backend/gen_rbac.ml +++ b/ocaml/idl/ocaml_backend/gen_rbac.ml @@ -57,7 +57,7 @@ let writer_csv static_permissions_roles = let hash2uuid str = let h = Digest.string str in - Option.map Uuidm.to_string (Uuidm.of_bytes h) + Option.map Uuidm.to_string (Uuidm.of_binary_string h) let replace_char str c1 c2 = let buf = Bytes.of_string str in diff --git a/ocaml/libs/ezxenstore/watch/ez_xenctrl_uuid.ml b/ocaml/libs/ezxenstore/watch/ez_xenctrl_uuid.ml index e255861d7d8..ab7794abb9c 100644 --- a/ocaml/libs/ezxenstore/watch/ez_xenctrl_uuid.ml +++ b/ocaml/libs/ezxenstore/watch/ez_xenctrl_uuid.ml @@ -21,7 +21,7 @@ let bytes_of_handle h = let uuid_of_handle h = let h' = bytes_of_handle h |> Bytes.to_string in - match Uuidm.of_bytes h' with + match Uuidm.of_binary_string h' with | Some x -> x | None -> diff --git a/ocaml/libs/uuid/uuidx.ml b/ocaml/libs/uuid/uuidx.ml index 98eefe1ab73..8fc44a47edd 100644 --- a/ocaml/libs/uuid/uuidx.ml +++ b/ocaml/libs/uuid/uuidx.ml @@ -98,15 +98,15 @@ let pp = Uuidm.pp let equal = Uuidm.equal -let of_bytes u = Uuidm.of_bytes ~pos:0 u +let of_bytes u = Uuidm.of_binary_string ~pos:0 u -let to_bytes = Uuidm.to_bytes +let to_bytes = Uuidm.to_binary_string let of_int_array arr = arr |> Array.to_seq |> Seq.map char_of_int |> String.of_seq |> of_bytes let to_int_array u = - Uuidm.to_bytes u |> String.to_seq |> Seq.map int_of_char |> Array.of_seq + to_bytes u |> String.to_seq |> Seq.map int_of_char |> Array.of_seq let of_string = Uuidm.of_string ~pos:0 diff --git a/ocaml/libs/vhd/vhd_format/f.ml b/ocaml/libs/vhd/vhd_format/f.ml index 66b3e2f788e..ac29cf8e8a4 100644 --- a/ocaml/libs/vhd/vhd_format/f.ml +++ b/ocaml/libs/vhd/vhd_format/f.ml @@ -100,12 +100,11 @@ let _mib_shift = 20 let _gib_shift = 30 -let blank_uuid = - match Uuidm.of_bytes (String.make 16 '\000') with - | Some x -> - x - | None -> - assert false (* never happens *) +let blank_uuid = Uuidm.nil + +let new_uuid () = + let random = Random.State.make_self_init () in + Uuidm.v4_gen random () module Feature = struct type t = Temporary @@ -394,7 +393,7 @@ module Footer = struct ?(creator_application = default_creator_application) ?(creator_version = default_creator_version) ?(creator_host_os = Host_OS.Other 0l) ~current_size - ?(original_size = current_size) ~disk_type ?(uid = Uuidm.v `V4) + ?(original_size = current_size) ~disk_type ?(uid = new_uuid ()) ?(saved_state = false) () = let geometry = Geometry.of_sectors Int64.(current_size lsr sector_shift) in let checksum = 0l in @@ -493,7 +492,7 @@ module Footer = struct set_footer_sectors buf t.geometry.Geometry.sectors ; set_footer_disk_type buf (Disk_type.to_int32 t.disk_type) ; set_footer_checksum buf 0l ; - set_footer_uid (Uuidm.to_bytes t.uid) 0 buf ; + set_footer_uid (Uuidm.to_binary_string t.uid) 0 buf ; set_footer_saved_state buf (if t.saved_state then 1 else 0) ; let remaining = Cstruct.shift buf sizeof_footer in for i = 0 to 426 do @@ -544,7 +543,7 @@ module Footer = struct Disk_type.of_int32 (get_footer_disk_type buf) >>= fun disk_type -> let checksum = get_footer_checksum buf in let bytes = copy_footer_uid buf in - ( match Uuidm.of_bytes bytes with + ( match Uuidm.of_binary_string bytes with | None -> R.error (Failure @@ -979,7 +978,9 @@ module Header = struct set_header_block_size buf (Int32.of_int (1 lsl (t.block_size_sectors_shift + sector_shift))) ; set_header_checksum buf 0l ; - set_header_parent_unique_id (Uuidm.to_bytes t.parent_unique_id) 0 buf ; + set_header_parent_unique_id + (Uuidm.to_binary_string t.parent_unique_id) + 0 buf ; set_header_parent_time_stamp buf t.parent_time_stamp ; set_header_reserved buf 0l ; for i = 0 to 511 do @@ -1074,7 +1075,7 @@ module Header = struct let block_size_sectors_shift = block_size_shift - sector_shift in let checksum = get_header_checksum buf in let bytes = copy_header_parent_unique_id buf in - ( match Uuidm.of_bytes bytes with + ( match Uuidm.of_binary_string bytes with | None -> R.error (Failure @@ -2141,7 +2142,7 @@ functor (* Assume the data is there, or will be written later *) return t - let create_dynamic ~filename ~size ?(uuid = Uuidm.v `V4) + let create_dynamic ~filename ~size ?(uuid = new_uuid ()) ?(saved_state = false) ?(features = []) () = (* The physical disk layout will be: byte 0 - 511: backup footer @@ -2212,7 +2213,7 @@ functor String.concat "/" (base @ target) let create_difference ~filename ~parent ?(relative_path = true) - ?(uuid = Uuidm.v `V4) ?(saved_state = false) ?(features = []) () = + ?(uuid = new_uuid ()) ?(saved_state = false) ?(features = []) () = (* We use the same basic file layout as in create_dynamic *) let data_offset = 512L in let table_offset = 2048L in diff --git a/ocaml/libs/xapi-inventory/lib/inventory.ml b/ocaml/libs/xapi-inventory/lib/inventory.ml index 867d4a2483e..88f8ddf9910 100644 --- a/ocaml/libs/xapi-inventory/lib/inventory.ml +++ b/ocaml/libs/xapi-inventory/lib/inventory.ml @@ -52,10 +52,14 @@ let inventory = Hashtbl.create 10 let inventory_m = Mutex.create () +let new_uuid () = + let random = Random.State.make_self_init () in + Uuidm.v4_gen random () + (* Compute the minimum necessary inventory file contents *) let minimum_default_entries () = - let host_uuid = Uuidm.to_string (Uuidm.v `V4) in - let dom0_uuid = Uuidm.to_string (Uuidm.v `V4) in + let host_uuid = Uuidm.to_string (new_uuid ()) in + let dom0_uuid = Uuidm.to_string (new_uuid ()) in [ (_installation_uuid, host_uuid) ; (_control_domain_uuid, dom0_uuid) diff --git a/ocaml/tests/test_ref.ml b/ocaml/tests/test_ref.ml index 7213e615e3f..ebf1fe72f42 100644 --- a/ocaml/tests/test_ref.ml +++ b/ocaml/tests/test_ref.ml @@ -3,7 +3,8 @@ let uuidm = Crowbar.( - map [bytes_fixed 16] @@ fun b -> b |> Uuidm.of_bytes ~pos:0 |> Option.get + map [bytes_fixed 16] @@ fun b -> + b |> Uuidm.of_binary_string ~pos:0 |> Option.get ) let ref_of_uuidm uuidm = diff --git a/ocaml/xapi-guard/test/cache_test.ml b/ocaml/xapi-guard/test/cache_test.ml index 3e51cab2c35..00235d543b7 100644 --- a/ocaml/xapi-guard/test/cache_test.ml +++ b/ocaml/xapi-guard/test/cache_test.ml @@ -156,6 +156,10 @@ let log_read (uuid, timestamp, key) = in Lwt_result.return "yes" +let new_uuid () = + let random = Random.State.make_self_init () in + Uuidm.v4_gen random () + let to_cache with_read_writes = let __FUN = __FUNCTION__ in let elapsed = Mtime_clock.counter () in @@ -180,7 +184,7 @@ let to_cache with_read_writes = let* () = Lwt.pause () in loop_and_stop f name uuid max sent in - let vms = List.init 4 (fun _ -> Uuidm.(v `V4)) in + let vms = List.init 4 (fun _ -> new_uuid ()) in List.concat [ diff --git a/ocaml/xapi-guard/test/xapi_guard_test.ml b/ocaml/xapi-guard/test/xapi_guard_test.ml index 5486f6b61d2..280d9f4d627 100644 --- a/ocaml/xapi-guard/test/xapi_guard_test.ml +++ b/ocaml/xapi-guard/test/xapi_guard_test.ml @@ -60,7 +60,7 @@ let xapi_rpc call = | _ -> Fmt.failwith "XAPI RPC call %s not expected in test" call.Rpc.name -let vm_uuid = Uuidm.v `V4 +let vm_uuid = Uuidm.v4_gen (Random.State.make_self_init ()) () let vm_uuid_str = Uuidm.to_string vm_uuid diff --git a/ocaml/xapi-idl/lib/uuidm_rpc_type.ml b/ocaml/xapi-idl/lib/uuidm_rpc_type.ml index 24a93fa13b6..51eef3c2eab 100644 --- a/ocaml/xapi-idl/lib/uuidm_rpc_type.ml +++ b/ocaml/xapi-idl/lib/uuidm_rpc_type.ml @@ -1,3 +1,7 @@ +let new_uuid () = + let random = Random.State.make_self_init () in + Uuidm.v4_gen random () + module Uuidm = struct include Uuidm @@ -6,7 +10,7 @@ module Uuidm = struct Rpc.Types.Abstract { aname= "uuid" - ; test_data= [Uuidm.v4_gen (Random.get_state ()) ()] + ; test_data= [new_uuid ()] ; rpc_of= (fun t -> Rpc.String (Uuidm.to_string t)) ; of_rpc= (function From 89e7cd14b2cdf7126644a8da17ca10147f6a09d8 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Tue, 8 Oct 2024 13:34:45 +0100 Subject: [PATCH 098/141] ezxenstore: avoid copies when converting to and from uuids Signed-off-by: Pau Ruiz Safont --- ocaml/libs/ezxenstore/watch/ez_xenctrl_uuid.ml | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/ocaml/libs/ezxenstore/watch/ez_xenctrl_uuid.ml b/ocaml/libs/ezxenstore/watch/ez_xenctrl_uuid.ml index ab7794abb9c..ad326ac2300 100644 --- a/ocaml/libs/ezxenstore/watch/ez_xenctrl_uuid.ml +++ b/ocaml/libs/ezxenstore/watch/ez_xenctrl_uuid.ml @@ -12,15 +12,8 @@ * GNU Lesser General Public License for more details. *) -let bytes_of_handle h = - let s = Bytes.make 16 '\000' in - for i = 0 to 15 do - Bytes.set s i (char_of_int h.(i)) - done ; - s - let uuid_of_handle h = - let h' = bytes_of_handle h |> Bytes.to_string in + let h' = String.init 16 (fun i -> char_of_int h.(i)) in match Uuidm.of_binary_string h' with | Some x -> x @@ -28,9 +21,5 @@ let uuid_of_handle h = failwith (Printf.sprintf "VM handle '%s' is an invalid uuid" h') let handle_of_uuid u = - let s = Uuidm.to_bytes u in - let h = Array.make 16 0 in - for i = 0 to 15 do - h.(i) <- int_of_char s.[i] - done ; - h + let s = Uuidm.to_binary_string u in + Array.init 16 (fun i -> int_of_char s.[i]) From 1a6cb7ee36e4052136c83bb5823d24c2a4c8bb70 Mon Sep 17 00:00:00 2001 From: Lin Liu Date: Sun, 29 Sep 2024 03:18:49 +0000 Subject: [PATCH 099/141] CP-50603: Replace `c_rehash` with `openssl rehash` sub command `openssl rehash` sub command provides the same functionality for `c_rehash`, but quicker We replace `c_rehash` with `openssl rehash` in XS9, However, we will keep `c_rehash` compatbilility for XS8 stream Signed-off-by: Lin Liu --- ocaml/xapi/certificates.ml | 10 +++++++++- ocaml/xapi/xapi_globs.ml | 4 ++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/ocaml/xapi/certificates.ml b/ocaml/xapi/certificates.ml index 6e1c01b7be6..2ae9e72aebe 100644 --- a/ocaml/xapi/certificates.ml +++ b/ocaml/xapi/certificates.ml @@ -51,7 +51,15 @@ let library_filename kind name = Filename.concat (library_path kind) name let mkdir_cert_path kind = Unixext.mkdir_rec (library_path kind) 0o700 let rehash' path = - ignore (Forkhelpers.execute_command_get_output !Xapi_globs.c_rehash [path]) + match Sys.file_exists !Xapi_globs.c_rehash with + | true -> + Forkhelpers.execute_command_get_output !Xapi_globs.c_rehash [path] + |> ignore + | false -> + (* c_rehash will be replaced with openssl sub-command in newer version *) + Forkhelpers.execute_command_get_output !Constants.openssl_path + ["rehash"; path] + |> ignore let rehash () = mkdir_cert_path CA_Certificate ; diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index d23d7ec4ce6..9a461a4e7bb 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -768,7 +768,7 @@ let server_cert_group_id = ref (-1) let server_cert_internal_path = ref (Filename.concat "/etc/xensource" "xapi-pool-tls.pem") -let c_rehash = ref "c_rehash" +let c_rehash = ref "/usr/bin/c_rehash" let trusted_certs_dir = ref "/etc/stunnel/certs" @@ -1714,7 +1714,6 @@ module Resources = struct ; ("createrepo-cmd", createrepo_cmd, "Path to createrepo command") ; ("modifyrepo-cmd", modifyrepo_cmd, "Path to modifyrepo command") ; ("rpm-cmd", rpm_cmd, "Path to rpm command") - ; ("c_rehash", c_rehash, "Path to Regenerate CA store") ] let nonessential_executables = @@ -1795,6 +1794,7 @@ module Resources = struct , yum_config_manager_cmd , "Path to yum-config-manager command" ) + ; ("c_rehash", c_rehash, "Path to regenerate CA store") ] let essential_files = From 686751fbfc423b1c5ab9e9797b83387cad79fd15 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 8 Oct 2024 08:22:47 +0100 Subject: [PATCH 100/141] CA-400124: rrd: Serialize transform parameter for data sources Previously, the transform function was not serialized in the plugin-server protocol (it was only used in xcp_rrdd itself, not in plugins). This issue was revealed by the previous work around splitting cpu metrics into a separate plugin. Instead of allowing arbitrary functions (which would be difficult to serialize), for 'fun x' offer just two options: * Inverse (1.0 - x), and * Identity (x) Default (if the parameter is not provided, either in OCaml or JSON), is Identity. Signed-off-by: Andrii Sultanov --- ocaml/libs/xapi-rrd/lib/rrd.ml | 18 +++++++++++++++--- ocaml/libs/xapi-rrd/lib_test/crowbar_tests.ml | 2 +- ocaml/libs/xapi-rrd/lib_test/unit_tests.ml | 10 +++++----- ocaml/xapi-idl/rrd/ds.ml | 4 ++-- ocaml/xcp-rrdd/bin/rrdd/rrdd_monitor.ml | 8 -------- ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml | 7 ++----- ocaml/xcp-rrdd/lib/transport/base/rrd_json.ml | 10 ++++++++++ .../lib/transport/base/rrd_protocol_v2.ml | 7 ++++++- ocaml/xcp-rrdd/lib/transport/base/rrd_rpc.ml | 10 ++++++++++ ocaml/xcp-rrdd/lib/transport/base/rrd_rpc.mli | 2 ++ ocaml/xenopsd/xc/mem_stats.ml | 3 ++- 11 files changed, 55 insertions(+), 26 deletions(-) diff --git a/ocaml/libs/xapi-rrd/lib/rrd.ml b/ocaml/libs/xapi-rrd/lib/rrd.ml index 3c2f8d707a8..0b67cc9efc5 100644 --- a/ocaml/libs/xapi-rrd/lib/rrd.ml +++ b/ocaml/libs/xapi-rrd/lib/rrd.ml @@ -22,6 +22,12 @@ exception No_RRA_Available exception Invalid_data_source of string +(** Inverse is (fun x -> 1.0 - x) *) +type ds_transform_function = Inverse | Identity + +let apply_transform_function f x = + match f with Inverse -> 1.0 -. x | Identity -> x + type ds_owner = VM of string | Host | SR of string (** Data source types - see ds datatype *) @@ -84,6 +90,12 @@ let ds_value_to_string = function | _ -> "0.0" +let ds_transform_function_to_string = function + | Inverse -> + "inverse" + | Identity -> + "identity" + (** The CDP preparation scratch area. The 'value' field should be accumulated in such a way that it always contains the value that will eventually be the CDP. This means that @@ -417,7 +429,7 @@ let ds_update rrd timestamp values transforms new_domid = ) in (* Apply the transform after the raw value has been calculated *) - let raw = transforms.(i) raw in + let raw = apply_transform_function transforms.(i) raw in (* Make sure the values are not out of bounds after all the processing *) if raw < ds.ds_min || raw > ds.ds_max then nan @@ -450,7 +462,7 @@ let ds_update_named rrd timestamp ~new_domid valuesandtransforms = valuesandtransforms |> List.to_seq |> StringMap.of_seq in let get_value_and_transform {ds_name; _} = - Option.value ~default:(VT_Unknown, Fun.id) + Option.value ~default:(VT_Unknown, Identity) (StringMap.find_opt ds_name valuesandtransforms) in let ds_values, ds_transforms = @@ -519,7 +531,7 @@ let rrd_create dss rras timestep inittime = } in let values = Array.map (fun ds -> ds.ds_last) dss in - let transforms = Array.make (Array.length values) (fun x -> x) in + let transforms = Array.make (Array.length values) Identity in ds_update rrd inittime values transforms true ; rrd diff --git a/ocaml/libs/xapi-rrd/lib_test/crowbar_tests.ml b/ocaml/libs/xapi-rrd/lib_test/crowbar_tests.ml index d3f01762d29..6ff917eccfc 100644 --- a/ocaml/libs/xapi-rrd/lib_test/crowbar_tests.ml +++ b/ocaml/libs/xapi-rrd/lib_test/crowbar_tests.ml @@ -81,7 +81,7 @@ let rrd = List.iteri (fun i v -> let t = 5. *. (init_time +. float_of_int i) in - ds_update rrd t [|VT_Int64 v|] [|Fun.id|] (i = 0) + ds_update rrd t [|VT_Int64 v|] [|Identity|] (i = 0) ) values ; rrd diff --git a/ocaml/libs/xapi-rrd/lib_test/unit_tests.ml b/ocaml/libs/xapi-rrd/lib_test/unit_tests.ml index b48ebf17688..089d8047468 100644 --- a/ocaml/libs/xapi-rrd/lib_test/unit_tests.ml +++ b/ocaml/libs/xapi-rrd/lib_test/unit_tests.ml @@ -131,7 +131,7 @@ let gauge_rrd = let rrd = rrd_create [|ds; ds2; ds3; ds4|] [|rra; rra2; rra3; rra4|] 1L 1000000000.0 in - let id x = x in + let id = Identity in for i = 1 to 100000 do let t = 1000000000.0 +. (0.7 *. float_of_int i) in let v1 = VT_Float (0.5 +. (0.5 *. sin (t /. 10.0))) in @@ -159,7 +159,7 @@ let _deserialize_verify_rrd = let rrd = rrd_create [|ds|] [|rra1; rra2; rra3|] 5L init_time in - let id x = x in + let id = Identity in for i = 1 to 100 do let t = init_time +. float_of_int i in let t64 = Int64.of_float t in @@ -178,7 +178,7 @@ let ca_322008_rrd = let rrd = rrd_create [|ds|] [|rra1; rra2; rra3|] 5L init_time in - let id x = x in + let id = Identity in for i = 1 to 100000 do let t = init_time +. float_of_int i in @@ -198,7 +198,7 @@ let ca_329043_rrd_1 = let rrd = rrd_create [|ds|] [|rra1; rra2; rra3|] 5L init_time in - let id x = x in + let id = Identity in let time_value_of_i i = let t = 5. *. (init_time +. float_of_int i) in @@ -228,7 +228,7 @@ let create_rrd ?(rows = 2) values min max = rrd_create [|ds1; ds2; ds3|] [|rra1; rra2; rra3; rra4|] 5L init_time in - let id x = x in + let id = Identity in List.iteri (fun i v -> diff --git a/ocaml/xapi-idl/rrd/ds.ml b/ocaml/xapi-idl/rrd/ds.ml index 620ba3fcc0c..0aef7dd5884 100644 --- a/ocaml/xapi-idl/rrd/ds.ml +++ b/ocaml/xapi-idl/rrd/ds.ml @@ -25,11 +25,11 @@ type ds = { ; ds_min: float ; ds_max: float ; ds_units: string - ; ds_pdp_transform_function: float -> float + ; ds_pdp_transform_function: Rrd.ds_transform_function } let ds_make ~name ~description ~value ~ty ~default ~units ?(min = neg_infinity) - ?(max = infinity) ?(transform = fun x -> x) () = + ?(max = infinity) ?(transform = Rrd.Identity) () = { ds_name= name ; ds_description= description diff --git a/ocaml/xcp-rrdd/bin/rrdd/rrdd_monitor.ml b/ocaml/xcp-rrdd/bin/rrdd/rrdd_monitor.ml index 5d445e0f7dc..34a44e92dfe 100644 --- a/ocaml/xcp-rrdd/bin/rrdd/rrdd_monitor.ml +++ b/ocaml/xcp-rrdd/bin/rrdd/rrdd_monitor.ml @@ -77,14 +77,6 @@ module OwnerMap = Map.Make (struct String.compare a b end) -let owner_to_string () = function - | Host -> - "host" - | VM uuid -> - "VM " ^ uuid - | SR uuid -> - "SR " ^ uuid - (** Updates all of the hosts rrds. We are passed a list of uuids that is used as the primary source for which VMs are resident on us. When a new uuid turns up that we haven't got an RRD for in our hashtbl, we create a new one. When diff --git a/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml index 8faf484f2b0..8b56119a76e 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml +++ b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml @@ -142,8 +142,7 @@ let dss_pcpus xc = ~description:("Physical cpu usage for cpu " ^ string_of_int i) ~value:(Rrd.VT_Float (Int64.to_float v /. 1.0e9)) ~min:0.0 ~max:1.0 ~ty:Rrd.Derive ~default:true - ~transform:(fun x -> 1.0 -. x) - () + ~transform:Rrd.Inverse () ) :: acc , i + 1 @@ -158,9 +157,7 @@ let dss_pcpus xc = , Ds.ds_make ~name:"cpu_avg" ~units:"(fraction)" ~description:"Average physical cpu usage" ~value:(Rrd.VT_Float (avg_array /. 1.0e9)) - ~min:0.0 ~max:1.0 ~ty:Rrd.Derive ~default:true - ~transform:(fun x -> 1.0 -. x) - () + ~min:0.0 ~max:1.0 ~ty:Rrd.Derive ~default:true ~transform:Rrd.Inverse () ) in avgcpu_ds :: dss diff --git a/ocaml/xcp-rrdd/lib/transport/base/rrd_json.ml b/ocaml/xcp-rrdd/lib/transport/base/rrd_json.ml index 1b0f531383e..b6d10953723 100644 --- a/ocaml/xcp-rrdd/lib/transport/base/rrd_json.ml +++ b/ocaml/xcp-rrdd/lib/transport/base/rrd_json.ml @@ -44,6 +44,15 @@ let ds_owner x = string "sr %s" sr ) +let ds_transform x = + ( "transform" + , match x with + | Rrd.Identity -> + string "identity" + | Rrd.Inverse -> + string "inverse" + ) + let bool b = string "%b" b (* Should use `Bool b *) let float x = string "%.2f" x @@ -63,6 +72,7 @@ let ds_to_json (owner, ds) = [ description ds.Ds.ds_description ; [ds_owner owner] + ; [ds_transform ds.Ds.ds_pdp_transform_function] ; ds_value ds.Ds.ds_value ; [ds_type ds.Ds.ds_type] ; [ diff --git a/ocaml/xcp-rrdd/lib/transport/base/rrd_protocol_v2.ml b/ocaml/xcp-rrdd/lib/transport/base/rrd_protocol_v2.ml index 1dc6f2d25dc..1c6774d525a 100644 --- a/ocaml/xcp-rrdd/lib/transport/base/rrd_protocol_v2.ml +++ b/ocaml/xcp-rrdd/lib/transport/base/rrd_protocol_v2.ml @@ -193,8 +193,13 @@ let uninitialised_ds_of_rpc ((name, rpc) : string * Rpc.t) : let default = bool_of_string (Rrd_rpc.assoc_opt ~key:"default" ~default:"false" kvs) in + let transform = + Rrd_rpc.transform_of_string + (Rrd_rpc.assoc_opt ~key:"transform" ~default:"identity" kvs) + in let ds = - Ds.ds_make ~name ~description ~units ~ty ~value ~min ~max ~default () + Ds.ds_make ~name ~description ~units ~ty ~value ~min ~max ~default + ~transform () in (owner, ds) diff --git a/ocaml/xcp-rrdd/lib/transport/base/rrd_rpc.ml b/ocaml/xcp-rrdd/lib/transport/base/rrd_rpc.ml index b8b1db7de2c..36ba1b42e59 100644 --- a/ocaml/xcp-rrdd/lib/transport/base/rrd_rpc.ml +++ b/ocaml/xcp-rrdd/lib/transport/base/rrd_rpc.ml @@ -53,3 +53,13 @@ let owner_of_string (s : string) : Rrd.ds_owner = Rrd.SR uuid | _ -> raise Rrd_protocol.Invalid_payload + +(* Converts a string to value of ds_transform_function type. *) +let transform_of_string (s : string) : Rrd.ds_transform_function = + match s with + | "inverse" -> + Rrd.Inverse + | "identity" -> + Rrd.Identity + | _ -> + raise Rrd_protocol.Invalid_payload diff --git a/ocaml/xcp-rrdd/lib/transport/base/rrd_rpc.mli b/ocaml/xcp-rrdd/lib/transport/base/rrd_rpc.mli index 8863b65bf4f..0d7d7493ead 100644 --- a/ocaml/xcp-rrdd/lib/transport/base/rrd_rpc.mli +++ b/ocaml/xcp-rrdd/lib/transport/base/rrd_rpc.mli @@ -21,3 +21,5 @@ val assoc_opt : key:string -> default:string -> (string * Rpc.t) list -> string val ds_ty_of_string : string -> Rrd.ds_type val owner_of_string : string -> Rrd.ds_owner + +val transform_of_string : string -> Rrd.ds_transform_function diff --git a/ocaml/xenopsd/xc/mem_stats.ml b/ocaml/xenopsd/xc/mem_stats.ml index 9e01d14473e..8daca47aff6 100644 --- a/ocaml/xenopsd/xc/mem_stats.ml +++ b/ocaml/xenopsd/xc/mem_stats.ml @@ -291,7 +291,8 @@ let observe_stats l = | Rrd.VT_Unknown -> nan in - ds.Ds.ds_pdp_transform_function f |> Printf.sprintf "%.0f" + Rrd.apply_transform_function ds.Ds.ds_pdp_transform_function f + |> Printf.sprintf "%.0f" ) in D.debug "stats header: %s" (String.concat "," names) ; From fe79b0f40dd1efb913a2153524d5c204a70b2e96 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Tue, 8 Oct 2024 11:25:06 +0100 Subject: [PATCH 101/141] CA-400124 - rrdd: only serialize transform when it's not default This saves on space, since only cpuX and cpu_avg datasources currently have transform=inverse, all the others have the default identity function specified. Signed-off-by: Andrii Sultanov --- ocaml/xcp-rrdd/lib/transport/base/rrd_json.ml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ocaml/xcp-rrdd/lib/transport/base/rrd_json.ml b/ocaml/xcp-rrdd/lib/transport/base/rrd_json.ml index b6d10953723..15f95e3de46 100644 --- a/ocaml/xcp-rrdd/lib/transport/base/rrd_json.ml +++ b/ocaml/xcp-rrdd/lib/transport/base/rrd_json.ml @@ -45,13 +45,13 @@ let ds_owner x = ) let ds_transform x = - ( "transform" - , match x with - | Rrd.Identity -> - string "identity" - | Rrd.Inverse -> - string "inverse" - ) + match x with + | Rrd.Identity -> + [] + (* This is the default when transform is absent, and not including it + makes the file smaller *) + | Rrd.Inverse -> + [("transform", string "inverse")] let bool b = string "%b" b (* Should use `Bool b *) @@ -72,7 +72,7 @@ let ds_to_json (owner, ds) = [ description ds.Ds.ds_description ; [ds_owner owner] - ; [ds_transform ds.Ds.ds_pdp_transform_function] + ; ds_transform ds.Ds.ds_pdp_transform_function ; ds_value ds.Ds.ds_value ; [ds_type ds.Ds.ds_type] ; [ From 1e554159b96d8ca18549ccf117e1d9c20a764109 Mon Sep 17 00:00:00 2001 From: Lin Liu Date: Fri, 27 Sep 2024 06:55:34 +0000 Subject: [PATCH 102/141] CA-392674: nbd_client_manager retry connect on nbd device busy to connect to nbd devices, nbd_client_manager will 1. protect the operation with /var/run/nonpersistent/nbd_client_manager file lock 2. check whether nbd is being used by `nbd-client -check` 3. load nbd kernel module by `modprobe nbd` 4. call `nbd-client` to connect to nbd device However, step 3 will trigger systemd-udevd run asyncly, which would open and lock the same nbd devices, run udev rules, etc. This introduce races with step 4, e.g. both process want to open and lock the nbd device. Note: the file lock in step 1 does NOT resovle the issue here, as it only coordinate multiple nbd_client_manager processes. To fix the issue, - we patch nbd-client to report the device busy from kernel to nbd_client_manager - nbd_client_manager should check nbd-client exit code, and retry on device busy - nbd_client_manager call `udevadm settle` to wait for udevd parsing udev rules Note: checking nbd-client exit code is still necessary in case of racing with others Signed-off-by: Lin Liu --- python3/libexec/nbd_client_manager.py | 36 +++++++++++++++++------- python3/tests/test_nbd_client_manager.py | 6 ++-- 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/python3/libexec/nbd_client_manager.py b/python3/libexec/nbd_client_manager.py index d0655df9756..3d0920a3845 100644 --- a/python3/libexec/nbd_client_manager.py +++ b/python3/libexec/nbd_client_manager.py @@ -24,6 +24,8 @@ # Don't wait more than 10 minutes for the NBD device MAX_DEVICE_WAIT_MINUTES = 10 +# According to https://github.com/thom311/libnl/blob/main/include/netlink/errno.h#L38 +NLE_BUSY = 25 class InvalidNbdDevName(Exception): """ @@ -80,7 +82,7 @@ def __exit__(self, *args): FILE_LOCK = FileLock(path=LOCK_FILE) -def _call(cmd_args, error=True): +def _call(cmd_args, raise_err=True, log_err=True): """ [call cmd_args] executes [cmd_args] and returns the exit code. If [error] and exit code != 0, log and throws a CalledProcessError. @@ -94,14 +96,16 @@ def _call(cmd_args, error=True): _, stderr = proc.communicate() - if error and proc.returncode != 0: - LOGGER.error( - "%s exited with code %d: %s", " ".join(cmd_args), proc.returncode, stderr - ) + if proc.returncode != 0: + if log_err: + LOGGER.error( + "%s exited with code %d: %s", " ".join(cmd_args), proc.returncode, stderr + ) - raise subprocess.CalledProcessError( - returncode=proc.returncode, cmd=cmd_args, output=stderr - ) + if raise_err: + raise subprocess.CalledProcessError( + returncode=proc.returncode, cmd=cmd_args, output=stderr + ) return proc.returncode @@ -116,7 +120,7 @@ def _is_nbd_device_connected(nbd_device): if not os.path.exists(nbd_device): raise NbdDeviceNotFound(nbd_device) cmd = ["nbd-client", "-check", nbd_device] - returncode = _call(cmd, error=False) + returncode = _call(cmd, raise_err=False, log_err=False) if returncode == 0: return True if returncode == 1: @@ -191,6 +195,8 @@ def connect_nbd(path, exportname): """Connects to a free NBD device using nbd-client and returns its path""" # We should not ask for too many nbds, as we might not have enough memory _call(["modprobe", "nbd", "nbds_max=24"]) + # Wait for systemd-udevd to process the udev rules + _call(["udevadm", "settle", "--timeout=30"]) retries = 0 while True: try: @@ -206,7 +212,17 @@ def connect_nbd(path, exportname): "-name", exportname, ] - _call(cmd) + ret = _call(cmd, raise_err=False, log_err=True) + if NLE_BUSY == ret: + # Although _find_unused_nbd_device tell us the nbd devcie is + # not connected by other nbd-client, it may be opened and locked + # by other process like systemd-udev, raise NbdDeviceNotFound to retry + LOGGER.warning("Device %s is busy, will retry", nbd_device) + raise NbdDeviceNotFound(nbd_device) + + if 0 != ret: + raise subprocess.CalledProcessError(returncode=ret, cmd=cmd) + _wait_for_nbd_device(nbd_device=nbd_device, connected=True) _persist_connect_info(nbd_device, path, exportname) nbd = ( diff --git a/python3/tests/test_nbd_client_manager.py b/python3/tests/test_nbd_client_manager.py index 224a1c3e2ea..b3f439f5442 100644 --- a/python3/tests/test_nbd_client_manager.py +++ b/python3/tests/test_nbd_client_manager.py @@ -43,7 +43,8 @@ def test_nbd_device_connected(self, mock_call, mock_exists): result = nbd_client_manager._is_nbd_device_connected('/dev/nbd0') self.assertTrue(result) - mock_call.assert_called_once_with(["nbd-client", "-check", "/dev/nbd0"], error=False) + mock_call.assert_called_once_with(["nbd-client", "-check", "/dev/nbd0"], + raise_err=False, log_err=False) @patch('nbd_client_manager._call') def test_nbd_device_not_connected(self, mock_call, mock_exists): @@ -53,7 +54,8 @@ def test_nbd_device_not_connected(self, mock_call, mock_exists): result = nbd_client_manager._is_nbd_device_connected('/dev/nbd1') self.assertFalse(result) - mock_call.assert_called_once_with(["nbd-client", "-check", "/dev/nbd1"], error=False) + mock_call.assert_called_once_with(["nbd-client", "-check", "/dev/nbd1"], + raise_err=False, log_err=False) def test_nbd_device_not_found(self, mock_exists): mock_exists.return_value = False From 2c651f25b90819155212a902722a389b57083007 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Wed, 2 Oct 2024 15:37:17 +0100 Subject: [PATCH 103/141] IH-715 - rrdp-netdev: Remove double (de)serialization networkd generates metrics for two users simultaneously: * xapi db * rrdd Both of these read from the same shared file, but use non-overlapping stats. Having moved network metrics collection from xcp-rrdd itself into a plugin, these metrics were serialized twice - moving from networkd to the plugin and from the plugin to the server. Instead generate metrics in the plugin itself and drop this generation from networkd. Signed-off-by: Andrii Sultanov --- ocaml/networkd/bin/network_monitor_thread.ml | 103 +------------- ocaml/xapi-idl/network/network_stats.ml | 16 +-- ocaml/xcp-rrdd/bin/rrdp-netdev/dune | 4 +- ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml | 132 +++++++++++++++++- 4 files changed, 142 insertions(+), 113 deletions(-) diff --git a/ocaml/networkd/bin/network_monitor_thread.ml b/ocaml/networkd/bin/network_monitor_thread.ml index 43b471be21a..9c4b7c3352d 100644 --- a/ocaml/networkd/bin/network_monitor_thread.ml +++ b/ocaml/networkd/bin/network_monitor_thread.ml @@ -109,7 +109,6 @@ let standardise_name name = with _ -> name let get_link_stats () = - let open Network_monitor in let open Netlink in let s = Socket.alloc () in Socket.connect s Socket.NETLINK_ROUTE ; @@ -124,101 +123,20 @@ let get_link_stats () = let is_vlan name = Astring.String.is_prefix ~affix:"eth" name && String.contains name '.' in - List.map (fun link -> (standardise_name (Link.get_name link), link)) links + List.map (fun link -> standardise_name (Link.get_name link)) links |> (* Only keep interfaces with prefixes on the whitelist, and exclude VLAN devices (ethx.y). *) - List.filter (fun (name, _) -> is_whitelisted name && not (is_vlan name)) - in - let devs = - List.map - (fun (name, link) -> - let convert x = Int64.of_int (Unsigned.UInt64.to_int x) in - let eth_stat = - { - default_stats with - rx_bytes= Link.get_stat link Link.RX_BYTES |> convert - ; rx_pkts= Link.get_stat link Link.RX_PACKETS |> convert - ; rx_errors= Link.get_stat link Link.RX_ERRORS |> convert - ; tx_bytes= Link.get_stat link Link.TX_BYTES |> convert - ; tx_pkts= Link.get_stat link Link.TX_PACKETS |> convert - ; tx_errors= Link.get_stat link Link.TX_ERRORS |> convert - } - in - (name, eth_stat) - ) - links + List.filter (fun name -> is_whitelisted name && not (is_vlan name)) in - Cache.free cache ; Socket.close s ; Socket.free s ; devs + Cache.free cache ; Socket.close s ; Socket.free s ; links let rec monitor dbg () = let open Network_interface in let open Network_monitor in ( try - let make_bond_info devs (name, interfaces) = - let devs' = - List.filter (fun (name', _) -> List.mem name' interfaces) devs - in - let eth_stat = - { - default_stats with - rx_bytes= - List.fold_left - (fun ac (_, stat) -> Int64.add ac stat.rx_bytes) - 0L devs' - ; rx_pkts= - List.fold_left - (fun ac (_, stat) -> Int64.add ac stat.rx_pkts) - 0L devs' - ; rx_errors= - List.fold_left - (fun ac (_, stat) -> Int64.add ac stat.rx_errors) - 0L devs' - ; tx_bytes= - List.fold_left - (fun ac (_, stat) -> Int64.add ac stat.tx_bytes) - 0L devs' - ; tx_pkts= - List.fold_left - (fun ac (_, stat) -> Int64.add ac stat.tx_pkts) - 0L devs' - ; tx_errors= - List.fold_left - (fun ac (_, stat) -> Int64.add ac stat.tx_errors) - 0L devs' - } - in - (name, eth_stat) - in - let add_bonds bonds devs = List.map (make_bond_info devs) bonds @ devs in - let transform_taps devs = - let newdevnames = - Xapi_stdext_std.Listext.List.setify (List.map fst devs) - in + let get_stats bonds devs = List.map - (fun name -> - let devs' = List.filter (fun (n, _) -> n = name) devs in - let tot = - List.fold_left - (fun acc (_, b) -> - { - default_stats with - rx_bytes= Int64.add acc.rx_bytes b.rx_bytes - ; rx_pkts= Int64.add acc.rx_pkts b.rx_pkts - ; rx_errors= Int64.add acc.rx_errors b.rx_errors - ; tx_bytes= Int64.add acc.tx_bytes b.tx_bytes - ; tx_pkts= Int64.add acc.tx_pkts b.tx_pkts - ; tx_errors= Int64.add acc.tx_errors b.tx_errors - } - ) - default_stats devs' - in - (name, tot) - ) - newdevnames - in - let add_other_stats bonds devs = - List.map - (fun (dev, stat) -> + (fun dev -> if not (Astring.String.is_prefix ~affix:"vif" dev) then ( let open Network_server.Bridge in let bond_slaves = @@ -242,7 +160,6 @@ let rec monitor dbg () = let links_up = if carrier then 1 else 0 in let interfaces = [dev] in { - stat with carrier ; speed ; duplex @@ -286,7 +203,6 @@ let rec monitor dbg () = List.map (fun info -> info.slave) bond_slaves in { - stat with carrier ; speed ; duplex @@ -301,7 +217,7 @@ let rec monitor dbg () = check_for_changes ~dev ~stat ; (dev, stat) ) else - (dev, stat) + (dev, default_stats) ) devs in @@ -309,12 +225,7 @@ let rec monitor dbg () = let bonds : (string * string list) list = Network_server.Bridge.get_all_bonds dbg from_cache in - let devs = - get_link_stats () - |> add_bonds bonds - |> transform_taps - |> add_other_stats bonds - in + let devs = get_link_stats () |> get_stats bonds in ( if List.length bonds <> Hashtbl.length bonds_status then let dead_bonds = Hashtbl.fold diff --git a/ocaml/xapi-idl/network/network_stats.ml b/ocaml/xapi-idl/network/network_stats.ml index 1e10cb8a755..5c6fbaafa26 100644 --- a/ocaml/xapi-idl/network/network_stats.ml +++ b/ocaml/xapi-idl/network/network_stats.ml @@ -35,13 +35,7 @@ let checksum_bytes = 32 let length_bytes = 8 type iface_stats = { - tx_bytes: int64 (** bytes emitted *) - ; tx_pkts: int64 (** packets emitted *) - ; tx_errors: int64 (** error emitted *) - ; rx_bytes: int64 (** bytes received *) - ; rx_pkts: int64 (** packets received *) - ; rx_errors: int64 (** error received *) - ; carrier: bool + carrier: bool ; speed: int ; duplex: duplex ; pci_bus_path: string @@ -55,13 +49,7 @@ type iface_stats = { let default_stats = { - tx_bytes= 0L - ; tx_pkts= 0L - ; tx_errors= 0L - ; rx_bytes= 0L - ; rx_pkts= 0L - ; rx_errors= 0L - ; carrier= false + carrier= false ; speed= 0 ; duplex= Duplex_unknown ; pci_bus_path= "" diff --git a/ocaml/xcp-rrdd/bin/rrdp-netdev/dune b/ocaml/xcp-rrdd/bin/rrdp-netdev/dune index fe68c431ab4..c5acc80a8be 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-netdev/dune +++ b/ocaml/xcp-rrdd/bin/rrdp-netdev/dune @@ -3,14 +3,16 @@ (name rrdp_netdev) (libraries astring + integers + netlink rrdd-plugin rrdd_plugin_xenctrl rrdd_plugins_libs - xapi-idl xapi-idl.network xapi-idl.rrd xapi-log xapi-rrd + xapi-stdext-std xenctrl ) ) diff --git a/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml b/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml index 55be1e88a0b..718fd574afd 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml +++ b/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml @@ -18,6 +18,128 @@ module D = Debug.Make (struct let name = "xcp-rrdp-netdev" end) module Process = Rrdd_plugin.Process (struct let name = "xcp-rrdd-netdev" end) +type iface_stats = { + tx_bytes: int64 (** bytes emitted *) + ; tx_pkts: int64 (** packets emitted *) + ; tx_errors: int64 (** error emitted *) + ; rx_bytes: int64 (** bytes received *) + ; rx_pkts: int64 (** packets received *) + ; rx_errors: int64 (** error received *) +} + +let default_stats = + { + tx_bytes= 0L + ; tx_pkts= 0L + ; tx_errors= 0L + ; rx_bytes= 0L + ; rx_pkts= 0L + ; rx_errors= 0L + } + +let monitor_whitelist = + ref + [ + "eth" + ; "vif" (* This includes "tap" owing to the use of standardise_name below *) + ] + +let standardise_name name = + try + let d1, d2 = Scanf.sscanf name "tap%d.%d" (fun d1 d2 -> (d1, d2)) in + let newname = Printf.sprintf "vif%d.%d" d1 d2 in + newname + with _ -> name + +let get_link_stats () = + let open Netlink in + let s = Socket.alloc () in + Socket.connect s Socket.NETLINK_ROUTE ; + let cache = Link.cache_alloc s in + let links = Link.cache_to_list cache in + let links = + let is_whitelisted name = + List.exists + (fun s -> Astring.String.is_prefix ~affix:s name) + !monitor_whitelist + in + let is_vlan name = + Astring.String.is_prefix ~affix:"eth" name && String.contains name '.' + in + List.map (fun link -> (standardise_name (Link.get_name link), link)) links + |> (* Only keep interfaces with prefixes on the whitelist, and exclude VLAN + devices (ethx.y). *) + List.filter (fun (name, _) -> is_whitelisted name && not (is_vlan name)) + in + let devs = + List.map + (fun (name, link) -> + let convert x = Int64.of_int (Unsigned.UInt64.to_int x) in + let eth_stat = + { + rx_bytes= Link.get_stat link Link.RX_BYTES |> convert + ; rx_pkts= Link.get_stat link Link.RX_PACKETS |> convert + ; rx_errors= Link.get_stat link Link.RX_ERRORS |> convert + ; tx_bytes= Link.get_stat link Link.TX_BYTES |> convert + ; tx_pkts= Link.get_stat link Link.TX_PACKETS |> convert + ; tx_errors= Link.get_stat link Link.TX_ERRORS |> convert + } + in + (name, eth_stat) + ) + links + in + Cache.free cache ; Socket.close s ; Socket.free s ; devs + +let make_bond_info devs (name, interfaces) = + let devs' = List.filter (fun (name', _) -> List.mem name' interfaces) devs in + let eth_stat = + { + rx_bytes= + List.fold_left (fun ac (_, stat) -> Int64.add ac stat.rx_bytes) 0L devs' + ; rx_pkts= + List.fold_left (fun ac (_, stat) -> Int64.add ac stat.rx_pkts) 0L devs' + ; rx_errors= + List.fold_left + (fun ac (_, stat) -> Int64.add ac stat.rx_errors) + 0L devs' + ; tx_bytes= + List.fold_left (fun ac (_, stat) -> Int64.add ac stat.tx_bytes) 0L devs' + ; tx_pkts= + List.fold_left (fun ac (_, stat) -> Int64.add ac stat.tx_pkts) 0L devs' + ; tx_errors= + List.fold_left + (fun ac (_, stat) -> Int64.add ac stat.tx_errors) + 0L devs' + } + in + (name, eth_stat) + +let add_bonds bonds devs = List.map (make_bond_info devs) bonds @ devs + +let transform_taps devs = + let newdevnames = Xapi_stdext_std.Listext.List.setify (List.map fst devs) in + List.map + (fun name -> + let devs' = List.filter (fun (n, _) -> n = name) devs in + let tot = + List.fold_left + (fun acc (_, b) -> + { + rx_bytes= Int64.add acc.rx_bytes b.rx_bytes + ; rx_pkts= Int64.add acc.rx_pkts b.rx_pkts + ; rx_errors= Int64.add acc.rx_errors b.rx_errors + ; tx_bytes= Int64.add acc.tx_bytes b.tx_bytes + ; tx_pkts= Int64.add acc.tx_pkts b.tx_pkts + ; tx_errors= Int64.add acc.tx_errors b.tx_errors + } + ) + default_stats devs' + in + (name, tot) + ) + newdevnames + let generate_netdev_dss doms () = let uuid_of_domid domains domid = let _, uuid, _ = @@ -28,8 +150,14 @@ let generate_netdev_dss doms () = in uuid in - let open Network_stats in - let stats = Network_stats.read_stats () in + + let dbg = "rrdp_netdev" in + let from_cache = true in + let bonds : (string * string list) list = + Network_client.Client.Bridge.get_all_bonds dbg from_cache + in + + let stats = get_link_stats () |> add_bonds bonds |> transform_taps in let dss, sum_rx, sum_tx = List.fold_left (fun (dss, sum_rx, sum_tx) (dev, stat) -> From 008a813be7fba88b9d7b53e52ae08013c71dd956 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Tue, 1 Oct 2024 12:53:32 +0100 Subject: [PATCH 104/141] http-lib: add backtrace to logs on connection without response Signed-off-by: Pau Ruiz Safont --- ocaml/libs/http-lib/http_client.ml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/ocaml/libs/http-lib/http_client.ml b/ocaml/libs/http-lib/http_client.ml index 8e8c5cd2d44..5cb67212bcc 100644 --- a/ocaml/libs/http-lib/http_client.ml +++ b/ocaml/libs/http-lib/http_client.ml @@ -177,6 +177,11 @@ let response_of_fd_exn fd = (Astring.String.cuts ~sep:"\n" buf) ) +(* Use a different logging brand, the one used by {D} is ignore in the default + configuration. This allows to have visibility of an issue that interrupts + storage migrations. *) +module L = Debug.Make (struct let name = __MODULE__ end) + (** [response_of_fd fd] returns an optional Http.Response.t record *) let response_of_fd ?(use_fastpath = false) fd = try @@ -188,7 +193,10 @@ let response_of_fd ?(use_fastpath = false) fd = | Unix.Unix_error (_, _, _) as e -> raise e | e -> - D.debug "%s: returning no response because of the exception: %s" + Backtrace.is_important e ; + let bt = Backtrace.get e in + Debug.log_backtrace e bt ; + L.debug "%s: returning no response because of the exception: %s" __FUNCTION__ (Printexc.to_string e) ; None From ed961460921468f16791472b8bf35cb90e886e00 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 4 Oct 2024 14:05:15 +0100 Subject: [PATCH 105/141] http-lib: convert bash script to cram tests Gives more flexibility in tests. Now the results from the client aren't printed, but weren't important to pass the test anyway. Signed-off-by: Pau Ruiz Safont --- ocaml/libs/http-lib/client_server_test.sh | 11 ----------- ocaml/libs/http-lib/dune | 6 +----- ocaml/libs/http-lib/test_client_server.t | 7 +++++++ 3 files changed, 8 insertions(+), 16 deletions(-) delete mode 100644 ocaml/libs/http-lib/client_server_test.sh create mode 100644 ocaml/libs/http-lib/test_client_server.t diff --git a/ocaml/libs/http-lib/client_server_test.sh b/ocaml/libs/http-lib/client_server_test.sh deleted file mode 100644 index 601ed257f99..00000000000 --- a/ocaml/libs/http-lib/client_server_test.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set -eux - -trap 'kill $(jobs -p)' EXIT - -./test_server.exe & -sleep 1 - -./test_client.exe - diff --git a/ocaml/libs/http-lib/dune b/ocaml/libs/http-lib/dune index ead0f1d19f6..d4e22f7d3c5 100644 --- a/ocaml/libs/http-lib/dune +++ b/ocaml/libs/http-lib/dune @@ -133,14 +133,10 @@ ) ) -(rule - (alias runtest) +(cram (package xapi) (deps test_client.exe test_server.exe - client_server_test.sh ) - (action (run bash client_server_test.sh)) ) - diff --git a/ocaml/libs/http-lib/test_client_server.t b/ocaml/libs/http-lib/test_client_server.t new file mode 100644 index 00000000000..12ac42da6c8 --- /dev/null +++ b/ocaml/libs/http-lib/test_client_server.t @@ -0,0 +1,7 @@ +== Bring server up + $ trap 'kill $(jobs -p)' EXIT + $ ./test_server.exe & + $ sleep 1 + +== Normal + $ ./test_client.exe > /dev/null From 3bef65f8b7948992825c68a0deff098ace7ce8b1 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 4 Oct 2024 18:21:41 +0100 Subject: [PATCH 106/141] http-lib: prepare test client for more commands Current behaviour for displaying stats is done with the --perf parameter Signed-off-by: Pau Ruiz Safont --- ocaml/libs/http-lib/test_client.ml | 50 +++++++++++------------- ocaml/libs/http-lib/test_client_server.t | 4 +- 2 files changed, 25 insertions(+), 29 deletions(-) diff --git a/ocaml/libs/http-lib/test_client.ml b/ocaml/libs/http-lib/test_client.ml index 041e08b0db4..4bc7f2a3e16 100644 --- a/ocaml/libs/http-lib/test_client.ml +++ b/ocaml/libs/http-lib/test_client.ml @@ -4,12 +4,15 @@ open Safe_resources let user_agent = "test_client" -(* To do: - 1. test with and without SSL - 2. test with n parallel threads - 3. make sure xapi still works - 4. make xapi able to read stats -*) +let ip = ref "127.0.0.1" + +let port = ref 8080 + +let use_ssl = ref false + +let use_fastpath = ref false + +let use_framing = ref false let with_connection ip port f = let inet_addr = Unix.inet_addr_of_string ip in @@ -108,30 +111,10 @@ let sample n f = done ; !p -let _ = - let ip = ref "127.0.0.1" in - let port = ref 8080 in - let use_ssl = ref false in - let use_fastpath = ref false in - let use_framing = ref false in - Arg.parse - [ - ("-ip", Arg.Set_string ip, "IP to connect to") - ; ("-p", Arg.Set_int port, "port to connect") - ; ("-fast", Arg.Set use_fastpath, "use HTTP fastpath") - ; ("-frame", Arg.Set use_framing, "use HTTP framing") - ; ("--ssl", Arg.Set use_ssl, "use SSL rather than plaintext") - ] - (fun x -> Printf.fprintf stderr "Ignoring unexpected argument: %s\n" x) - "A simple test HTTP client" ; +let perf () = let use_fastpath = !use_fastpath in let use_framing = !use_framing in let transport = if !use_ssl then with_stunnel else with_connection in - (* - Printf.printf "Overhead of timing: "; - let overhead = sample 10 (fun () -> per_nsec 1. (fun () -> ())) in - Printf.printf "%s ops/sec\n" (Normal_population.to_string overhead); -*) Printf.printf "1 thread non-persistent connections: " ; let nonpersistent = sample 1 (fun () -> @@ -183,3 +166,16 @@ let _ = ) in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string thread_persistent) + +let () = + Arg.parse + [ + ("-ip", Arg.Set_string ip, "IP to connect to") + ; ("-p", Arg.Set_int port, "port to connect") + ; ("-fast", Arg.Set use_fastpath, "use HTTP fastpath") + ; ("-frame", Arg.Set use_framing, "use HTTP framing") + ; ("--ssl", Arg.Set use_ssl, "use SSL rather than plaintext") + ; ("--perf", Arg.Unit perf, "Collect performance stats") + ] + (fun x -> Printf.fprintf stderr "Ignoring unexpected argument: %s\n" x) + "A simple test HTTP client" diff --git a/ocaml/libs/http-lib/test_client_server.t b/ocaml/libs/http-lib/test_client_server.t index 12ac42da6c8..21dc6762de1 100644 --- a/ocaml/libs/http-lib/test_client_server.t +++ b/ocaml/libs/http-lib/test_client_server.t @@ -1,7 +1,7 @@ == Bring server up $ trap 'kill $(jobs -p)' EXIT $ ./test_server.exe & - $ sleep 1 + $ sleep 0.1 == Normal - $ ./test_client.exe > /dev/null + $ ./test_client.exe --perf > /dev/null From d8bd9f233b86c2ef8dec1279ce515e579986da5c Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Mon, 7 Oct 2024 15:56:53 +0100 Subject: [PATCH 107/141] http-libs: add test about error logging While this does not exercise the exact error that can happen in long migrations, it gets logged in a similar way. There's no easy way to trigger the issue, the best chance is to send a malformed response to trigger a Parse_error. I did modify the code in http_client and verified that current code can produce the logging, with backtraces successfully, when set up properly (like in the test client) Signed-off-by: Pau Ruiz Safont --- ocaml/libs/http-lib/dune | 2 ++ ocaml/libs/http-lib/test_client.ml | 39 ++++++++++++++++++++++++ ocaml/libs/http-lib/test_client_server.t | 9 ++++++ ocaml/libs/http-lib/test_server.ml | 4 +++ 4 files changed, 54 insertions(+) diff --git a/ocaml/libs/http-lib/dune b/ocaml/libs/http-lib/dune index d4e22f7d3c5..cc5bec51648 100644 --- a/ocaml/libs/http-lib/dune +++ b/ocaml/libs/http-lib/dune @@ -113,6 +113,8 @@ safe-resources stunnel threads.posix + xapi-backtrace + xapi-log xapi-stdext-pervasives xapi-stdext-unix ) diff --git a/ocaml/libs/http-lib/test_client.ml b/ocaml/libs/http-lib/test_client.ml index 4bc7f2a3e16..43b4eb84e5e 100644 --- a/ocaml/libs/http-lib/test_client.ml +++ b/ocaml/libs/http-lib/test_client.ml @@ -167,6 +167,44 @@ let perf () = in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string thread_persistent) +let send_close_conn ~use_fastpath ~use_framing keep_alive s = + try + Http_client.rpc ~use_fastpath s + (Http.Request.make ~frame:use_framing ~version:"1.1" ~keep_alive + ~user_agent ~body:"hello" Http.Get "/close_conn" + ) (fun response s -> + match response.Http.Response.content_length with + | Some l -> + let _ = Unixext.really_read_string s (Int64.to_int l) in + Printf.printf "Received a response with %Ld bytes.\n" l ; + exit 1 + | None -> + Printf.printf "Need a content length\n" ; + exit 1 + ) + with Unix.Unix_error (Unix.ECONNRESET, "read", "") as e -> + Backtrace.is_important e ; + let bt = Backtrace.get e in + Debug.log_backtrace e bt + +let ( let@ ) f x = f x + +let logerr () = + (* Send a request to the server to close connection instead of replying with + an http request, force the error to be logged *) + Printexc.record_backtrace true ; + Debug.log_to_stdout () ; + Debug.set_level Syslog.Debug ; + let use_fastpath = !use_fastpath in + let use_framing = !use_framing in + let transport = if !use_ssl then with_stunnel else with_connection in + let call () = + let@ () = Backtrace.with_backtraces in + let@ s = transport !ip !port in + send_close_conn ~use_fastpath ~use_framing false s + in + match call () with `Ok () -> () | `Error (_, _) -> () + let () = Arg.parse [ @@ -176,6 +214,7 @@ let () = ; ("-frame", Arg.Set use_framing, "use HTTP framing") ; ("--ssl", Arg.Set use_ssl, "use SSL rather than plaintext") ; ("--perf", Arg.Unit perf, "Collect performance stats") + ; ("--logerr", Arg.Unit logerr, "Test log on error") ] (fun x -> Printf.fprintf stderr "Ignoring unexpected argument: %s\n" x) "A simple test HTTP client" diff --git a/ocaml/libs/http-lib/test_client_server.t b/ocaml/libs/http-lib/test_client_server.t index 21dc6762de1..2d862d29c81 100644 --- a/ocaml/libs/http-lib/test_client_server.t +++ b/ocaml/libs/http-lib/test_client_server.t @@ -5,3 +5,12 @@ == Normal $ ./test_client.exe --perf > /dev/null + +== Expect to log after a closed connection + $ ./test_client.exe --logerr > result + $ grep "ECONNRESET" result -c + 1 + $ grep "backtrace" result -c + 11 + $ grep "Called from" result -c + 8 diff --git a/ocaml/libs/http-lib/test_server.ml b/ocaml/libs/http-lib/test_server.ml index a1f703042ee..2cae4f4ba5f 100644 --- a/ocaml/libs/http-lib/test_server.ml +++ b/ocaml/libs/http-lib/test_server.ml @@ -80,6 +80,10 @@ let _ = ) ) ) ; + (* Forces a protocol error by closing the connection without sending a + proper http reponse code *) + Server.add_handler server Http.Get "/close_conn" + (FdIO (fun _ _ _ -> raise End_of_file)) ; let ip = "0.0.0.0" in let inet_addr = Unix.inet_addr_of_string ip in let addr = Unix.ADDR_INET (inet_addr, !port) in From cda61943ef4031dd84cc0bed381f24568886f1c7 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Wed, 9 Oct 2024 14:19:33 +0100 Subject: [PATCH 108/141] http-lib: use let@ for perf testing of the client No functional difference Signed-off-by: Pau Ruiz Safont --- ocaml/libs/http-lib/test_client.ml | 52 ++++++++++++------------------ 1 file changed, 21 insertions(+), 31 deletions(-) diff --git a/ocaml/libs/http-lib/test_client.ml b/ocaml/libs/http-lib/test_client.ml index 43b4eb84e5e..b8ca7a1ad18 100644 --- a/ocaml/libs/http-lib/test_client.ml +++ b/ocaml/libs/http-lib/test_client.ml @@ -111,59 +111,51 @@ let sample n f = done ; !p +let ( let@ ) f x = f x + let perf () = let use_fastpath = !use_fastpath in let use_framing = !use_framing in let transport = if !use_ssl then with_stunnel else with_connection in Printf.printf "1 thread non-persistent connections: " ; let nonpersistent = - sample 1 (fun () -> - per_nsec 1. (fun () -> - transport !ip !port (one ~use_fastpath ~use_framing false) - ) - ) + let@ () = sample 1 in + let@ () = per_nsec 1. in + transport !ip !port (one ~use_fastpath ~use_framing false) in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string nonpersistent) ; Printf.printf "1 thread non-persistent connections (query): " ; let nonpersistent_query = - sample 1 (fun () -> - per_nsec 1. (fun () -> - transport !ip !port (query ~use_fastpath ~use_framing false) - ) - ) + let@ () = sample 1 in + let@ () = per_nsec 1. in + transport !ip !port (query ~use_fastpath ~use_framing false) in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string nonpersistent_query) ; Printf.printf "10 threads non-persistent connections: " ; let thread_nonpersistent = - sample 1 (fun () -> - threads 10 (fun () -> - per_nsec 5. (fun () -> - transport !ip !port (one ~use_fastpath ~use_framing false) - ) - ) - ) + let@ () = sample 1 in + let@ () = threads 10 in + let@ () = per_nsec 5. in + transport !ip !port (one ~use_fastpath ~use_framing false) in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string thread_nonpersistent) ; Printf.printf "1 thread persistent connection: " ; let persistent = - sample 1 (fun () -> - transport !ip !port (fun s -> - per_nsec 1. (fun () -> one ~use_fastpath ~use_framing true s) - ) - ) + let@ () = sample 1 in + let@ s = transport !ip !port in + let@ () = per_nsec 1. in + one ~use_fastpath ~use_framing true s in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string persistent) ; Printf.printf "10 threads persistent connections: " ; let thread_persistent = - sample 1 (fun () -> - threads 10 (fun () -> - transport !ip !port (fun s -> - per_nsec 5. (fun () -> one ~use_fastpath ~use_framing true s) - ) - ) - ) + let@ () = sample 1 in + let@ () = threads 10 in + let@ s = transport !ip !port in + let@ () = per_nsec 5. in + one ~use_fastpath ~use_framing true s in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string thread_persistent) @@ -187,8 +179,6 @@ let send_close_conn ~use_fastpath ~use_framing keep_alive s = let bt = Backtrace.get e in Debug.log_backtrace e bt -let ( let@ ) f x = f x - let logerr () = (* Send a request to the server to close connection instead of replying with an http request, force the error to be logged *) From 4f8644106598af03993c44a6289a43dca72f4975 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Wed, 9 Oct 2024 14:29:26 +0100 Subject: [PATCH 109/141] http-lib: make perf shorter Taking measurements in practice doesn't lead to improved accuracy. Also change the tests so more than one sample is collected and can know how noisy the measurements really are. Here's an example of a run, including the result before the change: ``` $ ./test_client.exe --perf - 1 thread non-persistent connections: 4896.0 +/- 0.0 RPCs/sec - 1 thread non-persistent connections (query): 4811.0 +/- 0.0 RPCs/sec - 10 threads non-persistent connections: 7175.0 +/- 0.0 RPCs/sec - 1 thread persistent connection: 16047.0 +/- 0.0 RPCs/sec - 10 threads persistent connections: 7713.0 +/- 0.0 RPCs/sec + 1 thread non-persistent connections: 5042.0 +/- 247.5 RPCs/sec + 1 thread non-persistent connections (query): 5173.0 +/- 216.0 RPCs/sec + 10 threads non-persistent connections: 7678.0 +/- 2241.2 RPCs/sec + 1 thread persistent connection: 21814.0 +/- 2124.6 RPCs/sec + 10 threads persistent connections: 10154.0 +/- 2461.9 RPCs/sec ``` Signed-off-by: Pau Ruiz Safont --- ocaml/libs/http-lib/test_client.ml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/ocaml/libs/http-lib/test_client.ml b/ocaml/libs/http-lib/test_client.ml index b8ca7a1ad18..eada0811a42 100644 --- a/ocaml/libs/http-lib/test_client.ml +++ b/ocaml/libs/http-lib/test_client.ml @@ -117,44 +117,44 @@ let perf () = let use_fastpath = !use_fastpath in let use_framing = !use_framing in let transport = if !use_ssl then with_stunnel else with_connection in - Printf.printf "1 thread non-persistent connections: " ; + Printf.printf "1 thread non-persistent connections: " ; let nonpersistent = - let@ () = sample 1 in - let@ () = per_nsec 1. in + let@ () = sample 10 in + let@ () = per_nsec 0.1 in transport !ip !port (one ~use_fastpath ~use_framing false) in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string nonpersistent) ; - Printf.printf "1 thread non-persistent connections (query): " ; + Printf.printf "1 thread non-persistent connections (query): " ; let nonpersistent_query = - let@ () = sample 1 in - let@ () = per_nsec 1. in + let@ () = sample 10 in + let@ () = per_nsec 0.1 in transport !ip !port (query ~use_fastpath ~use_framing false) in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string nonpersistent_query) ; - Printf.printf "10 threads non-persistent connections: " ; + Printf.printf "10 threads non-persistent connections: " ; let thread_nonpersistent = - let@ () = sample 1 in + let@ () = sample 10 in let@ () = threads 10 in - let@ () = per_nsec 5. in + let@ () = per_nsec 0.1 in transport !ip !port (one ~use_fastpath ~use_framing false) in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string thread_nonpersistent) ; - Printf.printf "1 thread persistent connection: " ; + Printf.printf "1 thread persistent connection: " ; let persistent = - let@ () = sample 1 in + let@ () = sample 10 in let@ s = transport !ip !port in - let@ () = per_nsec 1. in + let@ () = per_nsec 0.1 in one ~use_fastpath ~use_framing true s in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string persistent) ; - Printf.printf "10 threads persistent connections: " ; + Printf.printf "10 threads persistent connections: " ; let thread_persistent = - let@ () = sample 1 in + let@ () = sample 10 in let@ () = threads 10 in let@ s = transport !ip !port in - let@ () = per_nsec 5. in + let@ () = per_nsec 0.1 in one ~use_fastpath ~use_framing true s in Printf.printf "%s RPCs/sec\n%!" (Normal_population.to_string thread_persistent) From aa09cb80d28d213d565b7187b2d5451c614b9437 Mon Sep 17 00:00:00 2001 From: Elijah Sadorra Date: Wed, 9 Oct 2024 09:25:35 +0000 Subject: [PATCH 110/141] CA-399256: Ensure AD domain name check is case insensitive Signed-off-by: Elijah Sadorra --- ocaml/xapi/xapi_pool.ml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index 13b1d698714..e3aca23e47b 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -322,7 +322,8 @@ let pre_join_checks ~__context ~rpc ~session_id ~force = slavetobe_auth_type slavetobe_auth_service_name ; if slavetobe_auth_type <> master_auth_type - || slavetobe_auth_service_name <> master_auth_service_name + || String.lowercase_ascii slavetobe_auth_service_name + <> String.lowercase_ascii master_auth_service_name then ( error "Cannot join pool whose external authentication configuration is \ From 52d21c08394d74958e48e73feddab273e86339d5 Mon Sep 17 00:00:00 2001 From: Christian Lindig Date: Wed, 9 Oct 2024 14:48:39 +0100 Subject: [PATCH 111/141] XSI-1722 fix timer for host heartbeat In XSI-1722 we notice that xapi does not detect a host being offline based on the heartbeat. The reason is a bug: we miss to store a new timer in the corresponding hash table. Signed-off-by: Christian Lindig --- ocaml/xapi/db_gc.ml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ocaml/xapi/db_gc.ml b/ocaml/xapi/db_gc.ml index 2efe11b89ee..c8c68309369 100644 --- a/ocaml/xapi/db_gc.ml +++ b/ocaml/xapi/db_gc.ml @@ -91,8 +91,12 @@ let check_host_liveness ~__context = | Some x -> x | None -> - Clock.Timer.start - ~duration:!Xapi_globs.host_assumed_dead_interval + let t = + Clock.Timer.start + ~duration:!Xapi_globs.host_assumed_dead_interval + in + Hashtbl.replace host_heartbeat_table host t ; + t ) in if not (Clock.Timer.has_expired timer) then From 271523424309da48667670206a426390fcb53a94 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Thu, 10 Oct 2024 10:04:40 +0100 Subject: [PATCH 112/141] fixup! IH-715 - rrdp-netdev: Remove double (de)serialization Signed-off-by: Andrii Sultanov --- ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml | 33 +++++++------------ 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml b/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml index 718fd574afd..5b138aebbe0 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml +++ b/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml @@ -44,12 +44,10 @@ let monitor_whitelist = ; "vif" (* This includes "tap" owing to the use of standardise_name below *) ] +(** Transform names of the form 'tapX.X' to 'vifX.X' so these can be handled + consistently later *) let standardise_name name = - try - let d1, d2 = Scanf.sscanf name "tap%d.%d" (fun d1 d2 -> (d1, d2)) in - let newname = Printf.sprintf "vif%d.%d" d1 d2 in - newname - with _ -> name + try Scanf.sscanf name "tap%d.%d" @@ Printf.sprintf "vif%d.%d" with _ -> name let get_link_stats () = let open Netlink in @@ -93,24 +91,17 @@ let get_link_stats () = let make_bond_info devs (name, interfaces) = let devs' = List.filter (fun (name', _) -> List.mem name' interfaces) devs in + let sum_list f = + List.fold_left (fun ac (_, stat) -> Int64.add ac (f stat)) 0L devs' + in let eth_stat = { - rx_bytes= - List.fold_left (fun ac (_, stat) -> Int64.add ac stat.rx_bytes) 0L devs' - ; rx_pkts= - List.fold_left (fun ac (_, stat) -> Int64.add ac stat.rx_pkts) 0L devs' - ; rx_errors= - List.fold_left - (fun ac (_, stat) -> Int64.add ac stat.rx_errors) - 0L devs' - ; tx_bytes= - List.fold_left (fun ac (_, stat) -> Int64.add ac stat.tx_bytes) 0L devs' - ; tx_pkts= - List.fold_left (fun ac (_, stat) -> Int64.add ac stat.tx_pkts) 0L devs' - ; tx_errors= - List.fold_left - (fun ac (_, stat) -> Int64.add ac stat.tx_errors) - 0L devs' + rx_bytes= sum_list (fun stat -> stat.rx_bytes) + ; rx_pkts= sum_list (fun stat -> stat.rx_pkts) + ; rx_errors= sum_list (fun stat -> stat.rx_errors) + ; tx_bytes= sum_list (fun stat -> stat.tx_bytes) + ; tx_pkts= sum_list (fun stat -> stat.tx_pkts) + ; tx_errors= sum_list (fun stat -> stat.tx_errors) } in (name, eth_stat) From f06d9209c99c534154a7a4d64db6968318eb685b Mon Sep 17 00:00:00 2001 From: Colin James Date: Fri, 11 Oct 2024 11:13:07 +0100 Subject: [PATCH 113/141] Replace fold with of_list in rbac Folding over a list to add its elements to a set (which is initially empty) is operationally equivalent to calling of_list (of the set), but potentially less efficient. The implementation of of_list only uses "add" for small lists, e.g. the cases for lists [x_1; x_2; ...; x_N] for all N in range 2 <= N <= 5 are matched literally and expanded to: add x_N (... (add x_1 (singleton x_0))) However, larger lists are first sorted and the underlying tree representing the set is constructed directly. Signed-off-by: Colin James --- ocaml/xapi/rbac.ml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/ocaml/xapi/rbac.ml b/ocaml/xapi/rbac.ml index 5b442f11a4a..feefcf4143f 100644 --- a/ocaml/xapi/rbac.ml +++ b/ocaml/xapi/rbac.ml @@ -69,10 +69,7 @@ let session_permissions_tbl = Hashtbl.create 256 (* initial 256 sessions *) module Permission_set = Set.Make (String) -let permission_set permission_list = - List.fold_left - (fun set r -> Permission_set.add r set) - Permission_set.empty permission_list +let permission_set = Permission_set.of_list let create_session_permissions_tbl ~session_id ~rbac_permissions = if From 6631d384ec77998cb95eb2d10e5668d8d82db901 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 11 Oct 2024 14:47:25 +0100 Subject: [PATCH 114/141] maintenance: write interface files for vhd-tool This detects some unused bindings and a mutable field. Signed-off-by: Pau Ruiz Safont --- ocaml/vhd-tool/cli/dune | 3 +- ocaml/vhd-tool/cli/sparse_dd.ml | 2 +- ocaml/vhd-tool/src/channels.mli | 35 +++++++++++++++ ocaml/vhd-tool/src/dune | 7 +-- ocaml/vhd-tool/src/impl.ml | 15 +------ ocaml/vhd-tool/src/impl.mli | 75 ++++++++++++++++++++++++++++++++ ocaml/vhd-tool/src/input.mli | 19 ++++++++ ocaml/vhd-tool/src/nbd_input.mli | 24 ++++++++++ ocaml/vhd-tool/src/xenstore.ml | 15 ------- quality-gate.sh | 2 +- 10 files changed, 159 insertions(+), 38 deletions(-) create mode 100644 ocaml/vhd-tool/src/channels.mli create mode 100644 ocaml/vhd-tool/src/impl.mli create mode 100644 ocaml/vhd-tool/src/input.mli create mode 100644 ocaml/vhd-tool/src/nbd_input.mli delete mode 100644 ocaml/vhd-tool/src/xenstore.ml diff --git a/ocaml/vhd-tool/cli/dune b/ocaml/vhd-tool/cli/dune index 99f73fa7615..aca350c9f45 100644 --- a/ocaml/vhd-tool/cli/dune +++ b/ocaml/vhd-tool/cli/dune @@ -3,7 +3,7 @@ (names main sparse_dd get_vhd_vsize) (libraries astring - + local_lib cmdliner cstruct @@ -19,6 +19,7 @@ xapi-idl xapi-log xenstore_transport.unix + ezxenstore ) ) diff --git a/ocaml/vhd-tool/cli/sparse_dd.ml b/ocaml/vhd-tool/cli/sparse_dd.ml index 7502a541e37..fe7a036de81 100644 --- a/ocaml/vhd-tool/cli/sparse_dd.ml +++ b/ocaml/vhd-tool/cli/sparse_dd.ml @@ -198,7 +198,7 @@ let after f g = the driver domain corresponding to the frontend device [path] in this domain. *) let find_backend_device path = try - let open Xenstore in + let open Ezxenstore_core.Xenstore in (* If we're looking at a xen frontend device, see if the backend is in the same domain. If so check if it looks like a .vhd *) let rdev = (Unix.LargeFile.stat path).Unix.LargeFile.st_rdev in diff --git a/ocaml/vhd-tool/src/channels.mli b/ocaml/vhd-tool/src/channels.mli new file mode 100644 index 00000000000..5fe6db7ab04 --- /dev/null +++ b/ocaml/vhd-tool/src/channels.mli @@ -0,0 +1,35 @@ +(* Copyright (C) Cloud Software Group Inc. + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; version 2.1 only. with the special + exception on linking described in file LICENSE. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. +*) + +type t = { + really_read: Cstruct.t -> unit Lwt.t + ; really_write: Cstruct.t -> unit Lwt.t + ; offset: int64 ref + ; skip: int64 -> unit Lwt.t + ; copy_from: Lwt_unix.file_descr -> int64 -> int64 Lwt.t + ; close: unit -> unit Lwt.t +} + +exception Impossible_to_seek + +val of_raw_fd : Lwt_unix.file_descr -> t Lwt.t + +val of_seekable_fd : Lwt_unix.file_descr -> t Lwt.t + +type verification_config = { + sni: string option + ; verify: Ssl.verify_mode + ; cert_bundle_path: string +} + +val of_ssl_fd : + Lwt_unix.file_descr -> string option -> verification_config option -> t Lwt.t diff --git a/ocaml/vhd-tool/src/dune b/ocaml/vhd-tool/src/dune index 8d278eefa07..f7ab6341f77 100644 --- a/ocaml/vhd-tool/src/dune +++ b/ocaml/vhd-tool/src/dune @@ -12,7 +12,6 @@ cohttp cohttp-lwt cstruct - (re_export ezxenstore) io-page lwt lwt.unix @@ -27,17 +26,13 @@ ssl tar uri + uuidm vhd-format vhd-format-lwt tapctl xapi-stdext-std xapi-stdext-unix xen-api-client-lwt - xenstore - xenstore.client - xenstore.unix - xenstore_transport - xenstore_transport.unix ) (preprocess (per_module diff --git a/ocaml/vhd-tool/src/impl.ml b/ocaml/vhd-tool/src/impl.ml index 6052e77eb52..d46fe0ac19d 100644 --- a/ocaml/vhd-tool/src/impl.ml +++ b/ocaml/vhd-tool/src/impl.ml @@ -42,12 +42,6 @@ end) open F -(* -open Vhd -open Vhd_format_lwt -*) -let vhd_search_path = "/dev/mapper" - let require name arg = match arg with | None -> @@ -398,16 +392,9 @@ module TarStream = struct ; nr_bytes_remaining: int ; (* start at 0 *) next_counter: int - ; mutable header: Tar.Header.t option + ; header: Tar.Header.t option } - let to_string t = - Printf.sprintf - "work_done = %Ld; nr_bytes_remaining = %d; next_counter = %d; filename = \ - %s" - t.work_done t.nr_bytes_remaining t.next_counter - (match t.header with None -> "None" | Some h -> h.Tar.Header.file_name) - let initial total_size = { work_done= 0L diff --git a/ocaml/vhd-tool/src/impl.mli b/ocaml/vhd-tool/src/impl.mli new file mode 100644 index 00000000000..2ffa08da6ce --- /dev/null +++ b/ocaml/vhd-tool/src/impl.mli @@ -0,0 +1,75 @@ +(* Copyright (C) Cloud Software Group Inc. + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; version 2.1 only. with the special + exception on linking described in file LICENSE. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. +*) + +module F : module type of Vhd_format.F.From_file (Vhd_format_lwt.IO) + +val get : + 'a + -> string option + -> string option + -> [> `Error of bool * string | `Ok of unit] + +val info : 'a -> string option -> [> `Error of bool * string | `Ok of unit] + +val contents : 'a -> string option -> [> `Error of bool * string | `Ok of unit] + +val create : + Common.t + -> string option + -> string option + -> string option + -> [> `Error of bool * string | `Ok of unit] + +val check : + Common.t -> string option -> [> `Error of bool * string | `Ok of unit] + +val stream : + Common.t -> StreamCommon.t -> [> `Error of bool * string | `Ok of unit] + +val serve : + Common.t + -> string + -> int option + -> string + -> string option + -> string + -> int option + -> string + -> int64 option + -> bool + -> bool + -> bool + -> string option + -> bool + -> [> `Error of bool * string | `Ok of unit] + +(** Functions used by sparse_dd *) + +val make_stream : + Common.t + -> string + -> string option + -> string + -> string + -> Vhd_format_lwt.IO.fd Nbd_input.F.stream Lwt.t + +val write_stream : + Common.t + -> Vhd_format_lwt.IO.fd F.stream + -> string + -> StreamCommon.protocol option + -> bool + -> (int64 -> int64 -> unit) + -> string option + -> string option + -> Channels.verification_config option + -> unit Lwt.t diff --git a/ocaml/vhd-tool/src/input.mli b/ocaml/vhd-tool/src/input.mli new file mode 100644 index 00000000000..eb7e43198be --- /dev/null +++ b/ocaml/vhd-tool/src/input.mli @@ -0,0 +1,19 @@ +(* Copyright (C) Cloud Software Group Inc. + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; version 2.1 only. with the special + exception on linking described in file LICENSE. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. +*) + +type 'a t = 'a Lwt.t + +type fd = {fd: Lwt_unix.file_descr; mutable offset: int64} + +include Vhd_format.S.INPUT with type 'a t := 'a t with type fd := fd + +val of_fd : Lwt_unix.file_descr -> fd diff --git a/ocaml/vhd-tool/src/nbd_input.mli b/ocaml/vhd-tool/src/nbd_input.mli new file mode 100644 index 00000000000..6c5b1c275ac --- /dev/null +++ b/ocaml/vhd-tool/src/nbd_input.mli @@ -0,0 +1,24 @@ +(* Copyright (C) Cloud Software Group Inc. + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; version 2.1 only. with the special + exception on linking described in file LICENSE. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. +*) + +module F : module type of Vhd_format.F.From_file (Vhd_format_lwt.IO) + +val raw : + ?extent_reader:string -> 'a -> string -> string -> int64 -> 'a F.stream Lwt.t + +val vhd : + ?extent_reader:string + -> Vhd_format_lwt.IO.fd Vhd_format.F.Raw.t + -> string + -> string + -> int64 + -> Vhd_format_lwt.IO.fd F.stream Lwt.t diff --git a/ocaml/vhd-tool/src/xenstore.ml b/ocaml/vhd-tool/src/xenstore.ml deleted file mode 100644 index b0c0dfd9e8d..00000000000 --- a/ocaml/vhd-tool/src/xenstore.ml +++ /dev/null @@ -1,15 +0,0 @@ -(* - * Copyright (C) Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) - -include Ezxenstore_core.Xenstore diff --git a/quality-gate.sh b/quality-gate.sh index 2629645e03a..74d76499986 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -25,7 +25,7 @@ verify-cert () { } mli-files () { - N=505 + N=500 # do not count ml files from the tests in ocaml/{tests/perftest/quicktest} MLIS=$(git ls-files -- '**/*.mli' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) MLS=$(git ls-files -- '**/*.ml' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) From c4a9e25c757ab4f679b8c9e852dad02aeeb0cdbb Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 11 Oct 2024 14:48:45 +0100 Subject: [PATCH 115/141] maintenance: add interface to vhd-tool's Chunked Also change the interface and explain the meaning behind the values. Signed-off-by: Pau Ruiz Safont --- ocaml/vhd-tool/cli/sparse_dd.ml | 4 ++-- ocaml/vhd-tool/src/chunked.ml | 9 +++++--- ocaml/vhd-tool/src/chunked.mli | 40 +++++++++++++++++++++++++++++++++ ocaml/vhd-tool/src/impl.ml | 4 ++-- quality-gate.sh | 2 +- 5 files changed, 51 insertions(+), 8 deletions(-) create mode 100644 ocaml/vhd-tool/src/chunked.mli diff --git a/ocaml/vhd-tool/cli/sparse_dd.ml b/ocaml/vhd-tool/cli/sparse_dd.ml index fe7a036de81..19dc6422a27 100644 --- a/ocaml/vhd-tool/cli/sparse_dd.ml +++ b/ocaml/vhd-tool/cli/sparse_dd.ml @@ -175,7 +175,7 @@ module Progress = struct let s = Printf.sprintf "Progress: %.0f" (fraction *. 100.) in let data = Cstruct.create (String.length s) in Cstruct.blit_from_string s 0 data 0 (String.length s) ; - Chunked.marshal header {Chunked.offset= 0L; data} ; + Chunked.(marshal header (make ~sector:0L data)) ; Printf.printf "%s%s%!" (Cstruct.to_string header) s ) @@ -183,7 +183,7 @@ module Progress = struct let close () = if !machine_readable_progress then ( let header = Cstruct.create Chunked.sizeof in - Chunked.marshal header {Chunked.offset= 0L; data= Cstruct.create 0} ; + Chunked.(marshal header end_of_stream) ; Printf.printf "%s%!" (Cstruct.to_string header) ) end diff --git a/ocaml/vhd-tool/src/chunked.ml b/ocaml/vhd-tool/src/chunked.ml index 92d7ebbfcaf..269dad8df5a 100644 --- a/ocaml/vhd-tool/src/chunked.ml +++ b/ocaml/vhd-tool/src/chunked.ml @@ -21,12 +21,15 @@ type t = { ; data: Cstruct.t (** data to write *) } -let marshal (buf : Cstruct.t) t = +let end_of_stream = {offset= 0L; data= Cstruct.create 0} + +let make ~sector ?(size = 512L) data = {offset= Int64.mul sector size; data} + +let marshal buf t = set_t_offset buf t.offset ; set_t_len buf (Int32.of_int (Cstruct.length t.data)) -let is_last_chunk (buf : Cstruct.t) = - get_t_offset buf = 0L && get_t_len buf = 0l +let is_last_chunk buf = get_t_offset buf = 0L && get_t_len buf = 0l let get_offset = get_t_offset diff --git a/ocaml/vhd-tool/src/chunked.mli b/ocaml/vhd-tool/src/chunked.mli new file mode 100644 index 00000000000..891e7266a35 --- /dev/null +++ b/ocaml/vhd-tool/src/chunked.mli @@ -0,0 +1,40 @@ +(* Copyright (C) Cloud Software Group Inc. + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; version 2.1 only. with the special + exception on linking described in file LICENSE. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. +*) + +val sizeof : int + +(** [t] is the metadata of a chunk of disk that's meant to be streamed. These + are used in a protocol that interleaves the metadata and the data until an + empty metadata block is sent, which signals the end of the stream. *) +type t + +val end_of_stream : t +(** [end_of_stream] is the value that signals the end of the stream of chunks + being transferred. *) + +val make : sector:int64 -> ?size:int64 -> Cstruct.t -> t +(** [make ~sector ?size data] creates a chunk of disk that needs to be + transferred, starting at the sector [sector]. [size] is the sector size, in + bytes. The default is 512. *) + +val marshal : Cstruct.t -> t -> unit +(** [marshall buffer chunk] writes the metadata of [chunk] to [buffer]. When + transferring a whole disk, this is called a header and is written before + the data. *) + +val is_last_chunk : Cstruct.t -> bool +(** [is_last_chunk buffer] returns whether the current [buffer] is + {end_of_stream} *) + +val get_offset : Cstruct.t -> int64 + +val get_len : Cstruct.t -> int32 diff --git a/ocaml/vhd-tool/src/impl.ml b/ocaml/vhd-tool/src/impl.ml index d46fe0ac19d..52f2b3aa501 100644 --- a/ocaml/vhd-tool/src/impl.ml +++ b/ocaml/vhd-tool/src/impl.ml @@ -298,7 +298,7 @@ let stream_chunked _common c s prezeroed _ ?(progress = no_progress_bar) () = (fun (sector, work_done) x -> ( match x with | `Sectors data -> - let t = {Chunked.offset= Int64.(mul sector 512L); data} in + let t = Chunked.make ~sector ~size:512L data in Chunked.marshal header t ; c.Channels.really_write header >>= fun () -> c.Channels.really_write data >>= fun () -> @@ -326,7 +326,7 @@ let stream_chunked _common c s prezeroed _ ?(progress = no_progress_bar) () = p total_work ; (* Send the end-of-stream marker *) - Chunked.marshal header {Chunked.offset= 0L; data= Cstruct.create 0} ; + Chunked.(marshal header end_of_stream) ; c.Channels.really_write header >>= fun () -> return (Some total_work) let stream_raw _common c s prezeroed _ ?(progress = no_progress_bar) () = diff --git a/quality-gate.sh b/quality-gate.sh index 74d76499986..c1d122efd72 100755 --- a/quality-gate.sh +++ b/quality-gate.sh @@ -25,7 +25,7 @@ verify-cert () { } mli-files () { - N=500 + N=499 # do not count ml files from the tests in ocaml/{tests/perftest/quicktest} MLIS=$(git ls-files -- '**/*.mli' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) MLS=$(git ls-files -- '**/*.ml' | grep -vE "ocaml/tests|ocaml/perftest|ocaml/quicktest|ocaml/message-switch/core_test" | xargs -I {} sh -c "echo {} | cut -f 1 -d '.'" \;) From 42841695a671ba0cf5c729750ab804b8c851c138 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 11 Oct 2024 15:05:02 +0100 Subject: [PATCH 116/141] maintenance: remove data from chd-tool's chunked datastructure The datastructure is mean to serialize the offset and length of a piece of disk, not its data. This also frontloads the possible conversion failure to the creation of the datastructure. Signed-off-by: Pau Ruiz Safont --- ocaml/vhd-tool/src/chunked.ml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/ocaml/vhd-tool/src/chunked.ml b/ocaml/vhd-tool/src/chunked.ml index 269dad8df5a..c95b727a499 100644 --- a/ocaml/vhd-tool/src/chunked.ml +++ b/ocaml/vhd-tool/src/chunked.ml @@ -18,16 +18,15 @@ let sizeof = sizeof_t type t = { offset: int64 (** offset on the physical disk *) - ; data: Cstruct.t (** data to write *) + ; len: int32 (** how much data to write *) } -let end_of_stream = {offset= 0L; data= Cstruct.create 0} +let end_of_stream = {offset= 0L; len= 0l} -let make ~sector ?(size = 512L) data = {offset= Int64.mul sector size; data} +let make ~sector ?(size = 512L) data = + {offset= Int64.mul sector size; len= Int32.of_int (Cstruct.length data)} -let marshal buf t = - set_t_offset buf t.offset ; - set_t_len buf (Int32.of_int (Cstruct.length t.data)) +let marshal buf t = set_t_offset buf t.offset ; set_t_len buf t.len let is_last_chunk buf = get_t_offset buf = 0L && get_t_len buf = 0l From 310429c6ed2204af660cfc525e49d019b7295f7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 11 Oct 2024 17:33:39 +0100 Subject: [PATCH 117/141] Revert "CP-48676: Don't check resuable pool session validity by default" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit c27b1d45b9a209ae922250a54b2a0a076af7a531. Signed-off-by: Edwin Török --- ocaml/xapi/xapi_globs.ml | 7 ------- ocaml/xapi/xapi_session.ml | 4 +--- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index c675e036451..38c9f88ac74 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -1055,8 +1055,6 @@ let disable_webserver = ref false let reuse_pool_sessions = ref true -let validate_reusable_pool_session = ref false - let test_open = ref 0 let xapi_globs_spec = @@ -1627,11 +1625,6 @@ let other_options = , (fun () -> string_of_bool !reuse_pool_sessions) , "Enable the reuse of pool sessions" ) - ; ( "validate-reusable-pool-session" - , Arg.Set validate_reusable_pool_session - , (fun () -> string_of_bool !validate_reusable_pool_session) - , "Enable the reuse of pool sessions" - ) ] (* The options can be set with the variable xapiflags in /etc/sysconfig/xapi. diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index bd981cb3692..abced81ca42 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -701,9 +701,7 @@ let login_no_password_common ~__context ~uname ~originator ~host ~pool with_lock reusable_pool_session_lock (fun () -> if !reusable_pool_session <> Ref.null - && ((not !Xapi_globs.validate_reusable_pool_session) - || is_valid_session !reusable_pool_session - ) + && is_valid_session !reusable_pool_session then !reusable_pool_session else From 76008ce63fff0a6c0c2558aa00423488ed5bd872 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edwin=20T=C3=B6r=C3=B6k?= Date: Fri, 11 Oct 2024 17:33:44 +0100 Subject: [PATCH 118/141] Revert "CP-48676: Reuse pool sessions on slave logins." MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit af68185ba81b9817741992410b48f9e28e118e06. Signed-off-by: Edwin Török --- ocaml/xapi/xapi_globs.ml | 7 ---- ocaml/xapi/xapi_session.ml | 77 +++++++------------------------------- 2 files changed, 13 insertions(+), 71 deletions(-) diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index 38c9f88ac74..5407faf3bf4 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -1053,8 +1053,6 @@ let pool_recommendations_dir = ref "/etc/xapi.pool-recommendations.d" let disable_webserver = ref false -let reuse_pool_sessions = ref true - let test_open = ref 0 let xapi_globs_spec = @@ -1620,11 +1618,6 @@ let other_options = , (fun () -> !Uuidx.make_default == Uuidx.make_uuid_fast |> string_of_bool) , "Use PRNG based UUID generator instead of CSPRNG" ) - ; ( "reuse-pool-sessions" - , Arg.Set reuse_pool_sessions - , (fun () -> string_of_bool !reuse_pool_sessions) - , "Enable the reuse of pool sessions" - ) ] (* The options can be set with the variable xapiflags in /etc/sysconfig/xapi. diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index abced81ca42..7e77def1f43 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -398,29 +398,19 @@ let is_subject_suspended ~__context ~cache subject_identifier = debug "Subject identifier %s is suspended" subject_identifier ; (is_suspended, subject_name) -let reusable_pool_session = ref Ref.null - -let reusable_pool_session_lock = Mutex.create () - let destroy_db_session ~__context ~self = Context.with_tracing ~__context __FUNCTION__ @@ fun __context -> - with_lock reusable_pool_session_lock (fun () -> - if self <> !reusable_pool_session then ( - Xapi_event.on_session_deleted self ; - (* unregister from the event system *) - (* This info line is important for tracking, auditability and client accountability purposes on XenServer *) - (* Never print the session id nor uuid: they are secret values that should be known only to the user that *) - (* logged in. Instead, we print a non-invertible hash as the tracking id for the session id *) - (* see also task creation in context.ml *) - (* CP-982: create tracking id in log files to link username to actions *) - info "Session.destroy %s" (trackid self) ; - Rbac_audit.session_destroy ~__context ~session_id:self ; - (try Db.Session.destroy ~__context ~self with _ -> ()) ; - Rbac.destroy_session_permissions_tbl ~session_id:self - ) else - info "Skipping Session.destroy for reusable pool session %s" - (trackid self) - ) + Xapi_event.on_session_deleted self ; + (* unregister from the event system *) + (* This info line is important for tracking, auditability and client accountability purposes on XenServer *) + (* Never print the session id nor uuid: they are secret values that should be known only to the user that *) + (* logged in. Instead, we print a non-invertible hash as the tracking id for the session id *) + (* see also task creation in context.ml *) + (* CP-982: create tracking id in log files to link username to actions *) + info "Session.destroy %s" (trackid self) ; + Rbac_audit.session_destroy ~__context ~session_id:self ; + (try Db.Session.destroy ~__context ~self with _ -> ()) ; + Rbac.destroy_session_permissions_tbl ~session_id:self (* CP-703: ensure that activate sessions are invalidated in a bounded time *) (* in response to external authentication/directory services updates, such as *) @@ -620,8 +610,8 @@ let revalidate_all_sessions ~__context = debug "Unexpected exception while revalidating external sessions: %s" (ExnHelper.string_of_exn e) -let login_no_password_common_create_session ~__context ~uname ~originator ~host - ~pool ~is_local_superuser ~subject ~auth_user_sid ~auth_user_name +let login_no_password_common ~__context ~uname ~originator ~host ~pool + ~is_local_superuser ~subject ~auth_user_sid ~auth_user_name ~rbac_permissions ~db_ref ~client_certificate = Context.with_tracing ~originator ~__context __FUNCTION__ @@ fun __context -> let create_session () = @@ -671,47 +661,6 @@ let login_no_password_common_create_session ~__context ~uname ~originator ~host ignore (Client.Pool.get_all ~rpc ~session_id) ; session_id -let login_no_password_common ~__context ~uname ~originator ~host ~pool - ~is_local_superuser ~subject ~auth_user_sid ~auth_user_name - ~rbac_permissions ~db_ref ~client_certificate = - Context.with_tracing ~originator ~__context __FUNCTION__ @@ fun __context -> - let is_valid_session session_id = - try - (* Call an API function to check the session is still valid *) - let rpc = Helpers.make_rpc ~__context in - ignore (Client.Pool.get_all ~rpc ~session_id) ; - true - with Api_errors.Server_error (err, _) -> - info "Invalid session: %s" err ; - false - in - let create_session () = - let new_session_id = - login_no_password_common_create_session ~__context ~uname ~originator - ~host ~pool ~is_local_superuser ~subject ~auth_user_sid ~auth_user_name - ~rbac_permissions ~db_ref ~client_certificate - in - new_session_id - in - if - (originator, pool, is_local_superuser, uname) - = (xapi_internal_originator, true, true, None) - && !Xapi_globs.reuse_pool_sessions - then - with_lock reusable_pool_session_lock (fun () -> - if - !reusable_pool_session <> Ref.null - && is_valid_session !reusable_pool_session - then - !reusable_pool_session - else - let new_session_id = create_session () in - reusable_pool_session := new_session_id ; - new_session_id - ) - else - create_session () - (* XXX: only used internally by the code which grants the guest access to the API. Needs to be protected by a proper access control system *) let login_no_password ~__context ~uname ~host ~pool ~is_local_superuser ~subject From ab2acfc5192057e0bc1a87f2663c656bddf60987 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Fri, 11 Oct 2024 17:45:06 +0100 Subject: [PATCH 119/141] maintenance: remove unused code from stream_vdi The code to extract vdis from geneva / zurich releases has been unused for years Signed-off-by: Pau Ruiz Safont --- ocaml/xapi/stream_vdi.ml | 71 ---------------------------------------- 1 file changed, 71 deletions(-) diff --git a/ocaml/xapi/stream_vdi.ml b/ocaml/xapi/stream_vdi.ml index 3c27d158af3..477e84cc827 100644 --- a/ocaml/xapi/stream_vdi.ml +++ b/ocaml/xapi/stream_vdi.ml @@ -554,74 +554,3 @@ let recv_all refresh_session ifd (__context : Context.t) rpc session_id vsn let has_inline_checksums = vsn.Importexport.export_vsn > 0 in recv_all_vdi refresh_session ifd __context rpc session_id ~has_inline_checksums ~force - -(** Receive a set of VDIs split into chunks in a tar format created out of a Zurich/Geneva - exported VM. Each chunk has been independently compressed.*) -let recv_all_zurich refresh_session ifd (__context : Context.t) rpc session_id - prefix_vdis = - TaskHelper.set_cancellable ~__context ; - TaskHelper.set_description ~__context "Importing Virtual Machine" ; - let progress = new_progress_record __context prefix_vdis in - (* The next header in the sequence *) - let hdr = ref None in - let next () = - hdr := - try Some (Tar_unix.get_next_header ifd) with - | Tar.Header.End_of_stream -> - None - | e -> - raise e - in - next () ; - let recv_one ifd (__context : Context.t) (prefix, vdi_ref, _size) = - (* Open this VDI and stream in all the blocks. Return when hdr represents - a chunk which is not part of this VDI or the end of stream is reached. *) - with_open_vdi __context rpc session_id vdi_ref `RW [Unix.O_WRONLY] 0o644 - (fun ofd _ -> - let rec stream_from (last_suffix : string) = - match !hdr with - | Some hdr -> - refresh_session () ; - let file_name = hdr.Tar.Header.file_name in - let length = hdr.Tar.Header.file_size in - if Astring.String.is_prefix ~affix:prefix file_name then ( - let suffix = - String.sub file_name (String.length prefix) - (String.length file_name - String.length prefix) - in - if suffix <= last_suffix then ( - error - "Expected VDI chunk suffix to have increased under \ - lexicograpic ordering; last = %s; this = %s" - last_suffix suffix ; - raise (Failure "Invalid XVA file") - ) ; - debug "Decompressing %Ld bytes from %s\n" length file_name ; - Gzip.Default.decompress ofd (fun zcat_in -> - Tar_helpers.copy_n ifd zcat_in length - ) ; - Tar_helpers.skip ifd (Tar.Header.compute_zero_padding_length hdr) ; - (* XXX: this is totally wrong: *) - made_progress __context progress length ; - next () ; - stream_from suffix - ) - | None -> - (* Since we don't count uncompressed bytes we aren't sure if we've - really finished unfortunately. We can at least check to see if we - were cancelled... *) - TaskHelper.exn_if_cancelling ~__context ; - () - in - stream_from "" ; Unixext.fsync ofd - ) - in - ( try for_each_vdi __context (recv_one ifd __context) prefix_vdis - with Unix.Unix_error (Unix.EIO, _, _) -> - raise - (Api_errors.Server_error (Api_errors.vdi_io_error, ["Device I/O error"])) - ) ; - if !hdr <> None then ( - error "Failed to import XVA; some chunks were not processed." ; - raise (Failure "Some XVA data not processed") - ) From 445ef24cd1ac99907d2cfa8cf64918640025ea07 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Mon, 14 Oct 2024 10:10:34 +0100 Subject: [PATCH 120/141] chore: update datamodel versions Signed-off-by: Pau Ruiz Safont --- ocaml/idl/datamodel_lifecycle.ml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ocaml/idl/datamodel_lifecycle.ml b/ocaml/idl/datamodel_lifecycle.ml index c16a4374342..60e46afb038 100644 --- a/ocaml/idl/datamodel_lifecycle.ml +++ b/ocaml/idl/datamodel_lifecycle.ml @@ -96,11 +96,11 @@ let prototyped_of_field = function | "pool", "telemetry_uuid" -> Some "23.9.0" | "pool", "ext_auth_cache_expiry" -> - Some "24.30.0-next" + Some "24.31.0" | "pool", "ext_auth_cache_size" -> - Some "24.30.0-next" + Some "24.31.0" | "pool", "ext_auth_cache_enabled" -> - Some "24.30.0-next" + Some "24.31.0" | "pool", "ext_auth_max_threads" -> Some "23.27.0" | "pool", "local_auth_max_threads" -> @@ -170,11 +170,11 @@ let prototyped_of_message = function | "pool", "get_guest_secureboot_readiness" -> Some "24.17.0" | "pool", "set_ext_auth_cache_expiry" -> - Some "24.30.0-next" + Some "24.31.0" | "pool", "set_ext_auth_cache_size" -> - Some "24.30.0-next" + Some "24.31.0" | "pool", "set_ext_auth_cache_enabled" -> - Some "24.30.0-next" + Some "24.31.0" | "pool", "set_ext_auth_max_threads" -> Some "23.27.0" | "pool", "set_local_auth_max_threads" -> From a2d9fbe39387fa9217590f6e3954cc75284b897a Mon Sep 17 00:00:00 2001 From: Robin Newton Date: Wed, 3 Jul 2024 09:06:45 +0100 Subject: [PATCH 121/141] IH-577 Implement v7 UUID generation * New function Uuidx.make_v7_uuid, with the idea being that ordering v7 UUIDs alphabetically will also order them by creation time. This requires uuidm v0.9.9, as that contains the code for constructing a v7 UUID from a time and some random bytes. * There is a function for generating v7 from known inputs, for the purpose of unit testing. Arguably this is pointless to have unit tests for third-party code, but the tests were written to test code that was submitted to uuidm only later, and I'm always loathe to delete tests. Signed-off-by: Robin Newton --- ocaml/libs/uuid/dune | 7 +++- ocaml/libs/uuid/uuid_test.ml | 72 ++++++++++++++++++++++++++++++++++++ ocaml/libs/uuid/uuidx.ml | 39 +++++++++++++++---- ocaml/libs/uuid/uuidx.mli | 14 ++++++- 4 files changed, 122 insertions(+), 10 deletions(-) diff --git a/ocaml/libs/uuid/dune b/ocaml/libs/uuid/dune index 81c7edec804..8c3f9efa2f7 100644 --- a/ocaml/libs/uuid/dune +++ b/ocaml/libs/uuid/dune @@ -3,8 +3,13 @@ (public_name uuid) (modules uuidx) (libraries - unix (re_export uuidm) + mtime + mtime.clock.os + ptime + ptime.clock.os threads.posix + unix + (re_export uuidm) ) (wrapped false) ) diff --git a/ocaml/libs/uuid/uuid_test.ml b/ocaml/libs/uuid/uuid_test.ml index 127f10b5824..8d835360e75 100644 --- a/ocaml/libs/uuid/uuid_test.ml +++ b/ocaml/libs/uuid/uuid_test.ml @@ -25,6 +25,46 @@ let uuid_arrays = let non_uuid_arrays = [[|0|]; [|0; 1; 2; 3; 4; 5; 6; 7; 8; 9; 10; 11; 12; 13; 14|]] +let uuid_v7_times = + let of_ms ms = Int64.mul 1_000_000L (Int64.of_float ms) in + let power_of_2_ms n = Float.pow 2.0 (Float.of_int n) |> of_ms in + let zero = 0L in + let ms = 1_000_000L in + let ns = 1L in + (* Using RFC9562 "method 3" for representiong sub-millisecond fractions, + that smallest amount of time a v7 UUID can represent is 1 / 4096 ms, + which is (just more than) 244 nanoseconds *) + let tick = 245L in + let ( + ) = Int64.add in + let ( - ) = Int64.sub in + [ + (zero, "00000000-0000-7000-8000-000000000000") + ; (tick, "00000000-0000-7001-8000-000000000000") + ; (ms, "00000000-0001-7000-8000-000000000000") + ; (ms - ns, "00000000-0000-7fff-8000-000000000000") + (* Test a wide range of dates - however, we can't express dates of + beyond epoch + (2^64 - 1) nanoseconds, which is about approximately + epoch + 2^44 milliseconds - some point in the 26th century *) + ; (power_of_2_ms 05, "00000000-0020-7000-8000-000000000000") + ; (power_of_2_ms 10, "00000000-0400-7000-8000-000000000000") + ; (power_of_2_ms 15, "00000000-8000-7000-8000-000000000000") + ; (power_of_2_ms 20, "00000010-0000-7000-8000-000000000000") + ; (power_of_2_ms 25, "00000200-0000-7000-8000-000000000000") + ; (power_of_2_ms 30, "00004000-0000-7000-8000-000000000000") + ; (power_of_2_ms 35, "00080000-0000-7000-8000-000000000000") + ; (power_of_2_ms 40, "01000000-0000-7000-8000-000000000000") + ; (power_of_2_ms 44, "10000000-0000-7000-8000-000000000000") + ; (power_of_2_ms 44 - ns, "0fffffff-ffff-7fff-8000-000000000000") + ; (power_of_2_ms 44 + tick, "10000000-0000-7001-8000-000000000000") + ] + +let uuid_v7_bytes = + [ + (1L, "00000000-0000-7000-8000-000000000001") + ; (-1L, "00000000-0000-7000-bfff-ffffffffffff") + ; (0x1234_5678_9abc_def0L, "00000000-0000-7000-9234-56789abcdef0") + ] + type resource = [`Generic] let uuid_testable : (module Alcotest.TESTABLE with type t = resource Uuidx.t) = @@ -51,6 +91,36 @@ let roundtrip_tests testing_uuid = ; ("Roundtrip array conversion", `Quick, test_array) ] +let uuid_v7_time_tests (t, expected_as_string) = + let expected = + match Uuidx.of_string expected_as_string with + | Some uuid -> + uuid + | None -> + Alcotest.fail + (Printf.sprintf "Couldn't convert to UUID: %s" expected_as_string) + in + let test () = + let result = Uuidx.make_v7_uuid_from_parts t 0L in + Alcotest.(check @@ uuid_testable) "make UUIDv7" expected result + in + (expected_as_string, [("Make UUIDv7 from time", `Quick, test)]) + +let uuid_v7_bytes_tests (rand_b, expected_as_string) = + let expected = + match Uuidx.of_string expected_as_string with + | Some uuid -> + uuid + | None -> + Alcotest.fail + (Printf.sprintf "Couldn't convert to UUID: %s" expected_as_string) + in + let test () = + let result = Uuidx.make_v7_uuid_from_parts 0L rand_b in + Alcotest.(check @@ uuid_testable) "make UUIDv7" expected result + in + (expected_as_string, [("Make UUIDv7 from bytes", `Quick, test)]) + let string_roundtrip_tests testing_string = let testing_uuid = match Uuidx.of_string testing_string with @@ -111,6 +181,8 @@ let regression_tests = ; List.map array_roundtrip_tests uuid_arrays ; List.map invalid_string_tests non_uuid_strings ; List.map invalid_array_tests non_uuid_arrays + ; List.map uuid_v7_time_tests uuid_v7_times + ; List.map uuid_v7_bytes_tests uuid_v7_bytes ] let () = Alcotest.run "Uuid" regression_tests diff --git a/ocaml/libs/uuid/uuidx.ml b/ocaml/libs/uuid/uuidx.ml index 8fc44a47edd..65392ef4485 100644 --- a/ocaml/libs/uuid/uuidx.ml +++ b/ocaml/libs/uuid/uuidx.ml @@ -131,21 +131,46 @@ let read_bytes dev n = let make_uuid_urnd () = of_bytes (read_bytes dev_urandom_fd 16) |> Option.get -(** Use non-CSPRNG by default, for CSPRNG see {!val:make_uuid_urnd} *) -let make_uuid_fast = - let uuid_state = Random.State.make_self_init () in +(* State for random number generation. Random.State.t isn't thread safe, so + only use this via with_non_csprng_state, which takes care of this. +*) +let rstate = Random.State.make_self_init () + +let rstate_m = Mutex.create () + +let with_non_csprng_state = (* On OCaml 5 we could use Random.State.split instead, and on OCaml 4 the mutex may not be strictly needed *) - let m = Mutex.create () in - let finally () = Mutex.unlock m in - let gen = Uuidm.v4_gen uuid_state in - fun () -> Mutex.lock m ; Fun.protect ~finally gen + let finally () = Mutex.unlock rstate_m in + fun f -> + Mutex.lock rstate_m ; + Fun.protect ~finally (f rstate) + +(** Use non-CSPRNG by default, for CSPRNG see {!val:make_uuid_urnd} *) +let make_uuid_fast () = with_non_csprng_state Uuidm.v4_gen let make_default = ref make_uuid_urnd let make () = !make_default () +let make_v7_uuid_from_parts time_ns rand_b = Uuidm.v7_ns ~time_ns ~rand_b + +let rand64 () = + with_non_csprng_state (fun rstate () -> Random.State.bits64 rstate) + +let now_ns = + let start = Mtime_clock.counter () in + let t0 = + let d, ps = Ptime_clock.now () |> Ptime.to_span |> Ptime.Span.to_d_ps in + Int64.(add (mul (of_int d) 86_400_000_000_000L) (div ps 1000L)) + in + fun () -> + let since_t0 = Mtime_clock.count start |> Mtime.Span.to_uint64_ns in + Int64.add t0 since_t0 + +let make_v7_uuid () = make_v7_uuid_from_parts (now_ns ()) (rand64 ()) + type cookie = string let make_cookie () = diff --git a/ocaml/libs/uuid/uuidx.mli b/ocaml/libs/uuid/uuidx.mli index ebc9f2e1611..1e1ebc3251c 100644 --- a/ocaml/libs/uuid/uuidx.mli +++ b/ocaml/libs/uuid/uuidx.mli @@ -115,8 +115,7 @@ type all = [without_secret | secret] type 'a t = Uuidm.t constraint 'a = [< all] val null : [< not_secret] t -(** A null UUID, as if such a thing actually existed. It turns out to be - useful though. *) +(** A null UUID, as defined in RFC 9562 5.9. *) val make : unit -> [< not_secret] t (** Create a fresh UUID *) @@ -130,6 +129,17 @@ val make_uuid_fast : unit -> [< not_secret] t Don't use this to generate secrets, see {!val:make_uuid_urnd} for that instead. *) +val make_v7_uuid_from_parts : int64 -> int64 -> [< not_secret] t +(** For testing only: create a v7 UUID, as defined in RFC 9562 5.7 *) + +val make_v7_uuid : unit -> [< not_secret] t +(** Create a fresh v7 UUID, as defined in RFC 9562 5.7. This incorporates a + POSIX timestamp, such that the alphabetic of any two such UUIDs will match + the timestamp order - provided that they are at least 245 nanoseconds + apart. Note that in order to ensure that the timestamps used are + monotonic, operating time adjustments are ignored and hence timestamps + only approximate system time. *) + val pp : Format.formatter -> [< not_secret] t -> unit val equal : 'a t -> 'a t -> bool From 95073698f927284c3bf276786e339b4352616138 Mon Sep 17 00:00:00 2001 From: Alex Brett Date: Wed, 16 Oct 2024 12:33:05 +0000 Subject: [PATCH 122/141] Update wire-protocol.md to have working Python3 code examples The document was updated in f785993f244c0b2234f68338a3478d908bc4a9f4 to use Python 3, however the examples were left unchanged. In Python 3, `xmlrpclib` does not exist, it has been replaced by the `xmlrpc` module (specifically in this case `xmlrpclib.Server` has been replaced by `xmlrpc.client.ServerProxy`). Additionally, the `python-jsonrpc` package has not been ported to Python 3, and as such is unavailable. There is no direct replacement, however a combination of the `jsonrpcclient` package and the `requests` packages provide a reasonable solution. This commit updates the examples appropriately so they function in Python 3. Signed-off-by: Alex Brett --- ocaml/doc/wire-protocol.md | 71 +++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 27 deletions(-) diff --git a/ocaml/doc/wire-protocol.md b/ocaml/doc/wire-protocol.md index 155a27b23e0..e266afa3a6e 100644 --- a/ocaml/doc/wire-protocol.md +++ b/ocaml/doc/wire-protocol.md @@ -469,12 +469,21 @@ $ python3 ### Using the XML-RPC Protocol -Import the library `xmlrpclib` and create a +Import the library `xmlrpc.client` and create a python object referencing the remote server as shown below: ```python ->>> import xmlrpclib ->>> xen = xmlrpclib.Server("https://localhost:443") +>>> import xmlrpc.client +>>> xen = xmlrpc.client.ServerProxy("https://localhost:443") +``` + +Note that you may need to disable SSL certificate validation to establish the +connection, this can be done as follows: + +```python +>>> import ssl +>>> ctx = ssl._create_unverified_context() +>>> xen = xmlrpc.client.ServerProxy("https://localhost:443", context=ctx) ``` Acquire a session reference by logging in with a username and password; the @@ -555,27 +564,38 @@ To retrieve all the VM records in a single call: ### Using the JSON-RPC Protocol -For this example we are making use of the package `python-jsonrpc` due to its -simplicity, although other packages can also be used. +For this example we are making use of the package `jsonrpcclient` and the +`requests` library due to their simplicity, although other packages can also be +used. -First, import the library `pyjsonrpc` and create the object referencing the -remote server as follows: +First, import the `requests` and `jsonrpcclient` libraries: ```python ->>> import pyjsonrpc ->>> client = pyjsonrpc.HttpClient(url = "https://localhost/jsonrpc:443") +>>> import requests +>>> import jsonrpcclient ``` -Acquire a session reference by logging in with a username and password; the -library `pyjsonrpc` returns the response's `result` member, which is the session -reference: +Now we construct a utility method to make using these libraries easier: + +```python +>>> def jsonrpccall(method, params): +... r = requests.post("https://localhost:443/jsonrpc", +... json=jsonrpcclient.request(method, params=params), +... verify=False) +... p = jsonrpcclient.parse(r.json()) +... if isinstance(p, jsonrpcclient.Ok): +... return p.result +... raise Exception(p.message, p.data) +``` + +Acquire a session reference by logging in with a username and password: ```python ->>> session = client.call("session.login_with_password", -... "user", "passwd", "version", "originator") +>>> session = jsonrpccall("session.login_with_password", +... ("user", "password", "version", "originator")) ``` -`pyjsonrpc` uses the JSON-RPC protocol v2.0, so this is what the serialized +`jsonrpcclient` uses the JSON-RPC protocol v2.0, so this is what the serialized request looks like: ```json @@ -591,7 +611,7 @@ Next, the user may acquire a list of all the VMs known to the system (note the call takes the session reference as the only parameter): ```python ->>> all_vms = client.call("VM.get_all", session) +>>> all_vms = jsonrpccall("VM.get_all", (session,)) >>> all_vms ['OpaqueRef:1', 'OpaqueRef:2', 'OpaqueRef:3', 'OpaqueRef:4' ] ``` @@ -603,22 +623,19 @@ find the subset of template VMs using a command like the following: ```python >>> all_templates = filter( -... lambda x: client.call("VM.get_is_a_template", session, x), - all_vms) +... lambda x: jsonrpccall("VM.get_is_a_template", (session, x)), +... all_vms) ``` Once a reference to a VM has been acquired, a lifecycle operation may be invoked: ```python ->>> from pyjsonrpc import JsonRpcError >>> try: -... client.call("VM.start", session, all_templates[0], False, False) -... except JsonRpcError as e: -... e.message -... e.data +... jsonrpccall("VM.start", (session, next(all_templates), False, False)) +... except Exception as e: +... e ... -'VM_IS_TEMPLATE' -[ 'OpaqueRef:1', 'start' ] +Exception('VM_IS_TEMPLATE', ['OpaqueRef:1', 'start']) ``` In this case the `start` message has been rejected because the VM is @@ -629,7 +646,7 @@ Rather than querying fields individually, whole _records_ may be returned at onc To retrieve the record of a single object as a python dictionary: ```python ->>> record = client.call("VM.get_record", session, all_templates[0]) +>>> record = jsonrpccall("VM.get_record", (session, next(all_templates))) >>> record['power_state'] 'Halted' >>> record['name_label'] @@ -639,7 +656,7 @@ To retrieve the record of a single object as a python dictionary: To retrieve all the VM records in a single call: ```python ->>> records = client.call("VM.get_all_records", session) +>>> records = jsonrpccall("VM.get_all_records", (session,)) >>> records.keys() ['OpaqueRef:1', 'OpaqueRef:2', 'OpaqueRef:3', 'OpaqueRef:4' ] >>> records['OpaqueRef:1']['name_label'] From 00c13da9b463f5b70a9ad6d8af5262edc1482aee Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Mon, 14 Oct 2024 21:58:10 +0100 Subject: [PATCH 123/141] Added WLB_VM_RELOCATION to the list of recognized messages. Signed-off-by: Konstantina Chremmou --- ocaml/sdk-gen/csharp/templates/Message2.mustache | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ocaml/sdk-gen/csharp/templates/Message2.mustache b/ocaml/sdk-gen/csharp/templates/Message2.mustache index 4661d815146..3dfe4f4503e 100644 --- a/ocaml/sdk-gen/csharp/templates/Message2.mustache +++ b/ocaml/sdk-gen/csharp/templates/Message2.mustache @@ -43,6 +43,7 @@ namespace XenAPI LEAF_COALESCE_COMPLETED, LEAF_COALESCE_FAILED, POST_ATTACH_SCAN_FAILED, + WLB_VM_RELOCATION, {{#message_types}} {{{message_type}}}, {{/message_types}} @@ -73,6 +74,8 @@ namespace XenAPI return MessageType.LEAF_COALESCE_FAILED; case "POST_ATTACH_SCAN_FAILED": return MessageType.POST_ATTACH_SCAN_FAILED; + case "WLB_VM_RELOCATION": + return MessageType.WLB_VM_RELOCATION; {{#message_types}} case "{{{message_type}}}": return MessageType.{{{message_type}}}; From 3a727d29ca9047c3f6631018ccc61b4580d3034e Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Wed, 16 Oct 2024 16:11:58 +0100 Subject: [PATCH 124/141] Python command correction. Signed-off-by: Konstantina Chremmou --- ocaml/doc/wire-protocol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/doc/wire-protocol.md b/ocaml/doc/wire-protocol.md index e266afa3a6e..26b911bd2c7 100644 --- a/ocaml/doc/wire-protocol.md +++ b/ocaml/doc/wire-protocol.md @@ -556,7 +556,7 @@ To retrieve all the VM records in a single call: ```python >>> records = xen.VM.get_all_records(session)['Value'] ->>> records.keys() +>>> list(records.keys()) ['OpaqueRef:1', 'OpaqueRef:2', 'OpaqueRef:3', 'OpaqueRef:4' ] >>> records['OpaqueRef:1']['name_label'] 'Red Hat Enterprise Linux 7' From 80528e073fee44f7e76b1962ba0c35d59425462d Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Thu, 17 Oct 2024 09:59:09 +0000 Subject: [PATCH 125/141] Remove unused Http_svr.Chunked module Signed-off-by: Rob Hoes --- ocaml/libs/http-lib/http_svr.ml | 72 -------------------------------- ocaml/libs/http-lib/http_svr.mli | 10 ----- 2 files changed, 82 deletions(-) diff --git a/ocaml/libs/http-lib/http_svr.ml b/ocaml/libs/http-lib/http_svr.ml index e04520d8567..d033fb7805f 100644 --- a/ocaml/libs/http-lib/http_svr.ml +++ b/ocaml/libs/http-lib/http_svr.ml @@ -713,78 +713,6 @@ let read_body ?limit req bio = else Buf_io.really_input_buf ~timeout:Buf_io.infinite_timeout bio length -module Chunked = struct - type t = { - mutable current_size: int - ; mutable current_offset: int - ; mutable read_headers: bool - ; bufio: Buf_io.t - } - - let of_bufio bufio = - {current_size= 0; current_offset= 0; bufio; read_headers= true} - - let rec read chunk size = - if chunk.read_headers = true then ( - (* first get the size, then get the data requested *) - let size = - Buf_io.input_line chunk.bufio - |> Bytes.to_string - |> String.trim - |> Printf.sprintf "0x%s" - |> int_of_string - in - chunk.current_size <- size ; - chunk.current_offset <- 0 ; - chunk.read_headers <- false - ) ; - (* read as many bytes from this chunk as possible *) - if chunk.current_size = 0 then - "" - else - let bytes_to_read = - min size (chunk.current_size - chunk.current_offset) - in - if bytes_to_read = 0 then - "" - else - let data = Bytes.make bytes_to_read '\000' in - Buf_io.really_input chunk.bufio data 0 bytes_to_read ; - (* now update the data structure: *) - if chunk.current_offset + bytes_to_read = chunk.current_size then ( - (* finished a chunk: get rid of the CRLF *) - let blank = Bytes.of_string "\000\000" in - Buf_io.really_input chunk.bufio blank 0 2 ; - if Bytes.to_string blank <> "\r\n" then - failwith "chunked encoding error" ; - chunk.read_headers <- true - ) else (* partway through a chunk. *) - chunk.current_offset <- chunk.current_offset + bytes_to_read ; - Bytes.unsafe_to_string data ^ read chunk (size - bytes_to_read) -end - -let read_chunked_encoding _req bio = - let rec next () = - let size = - Buf_io.input_line bio - (* Strictly speaking need to kill anything past an ';' if present *) - |> Bytes.to_string - |> String.trim - |> Printf.sprintf "0x%s" - |> int_of_string - in - if size = 0 then - Http.End - else - let chunk = Bytes.make size '\000' in - Buf_io.really_input bio chunk 0 size ; - (* Then get rid of the CRLF *) - let blank = Bytes.of_string "\000\000" in - Buf_io.really_input bio blank 0 2 ; - Http.Item (chunk, next) - in - next () - (* Helpers to determine the client of a call *) type protocol = Https | Http diff --git a/ocaml/libs/http-lib/http_svr.mli b/ocaml/libs/http-lib/http_svr.mli index d85ad28a2ec..52f6719dd0a 100644 --- a/ocaml/libs/http-lib/http_svr.mli +++ b/ocaml/libs/http-lib/http_svr.mli @@ -74,16 +74,6 @@ exception Socket_not_found val stop : socket -> unit -module Chunked : sig - type t - - val of_bufio : Buf_io.t -> t - - val read : t -> int -> string -end - -val read_chunked_encoding : Http.Request.t -> Buf_io.t -> bytes Http.ll - (* The rest of this interface needs to be deleted and replaced with Http.Response.* *) val response_fct : From 11132993a52f43eb9a562ab1481f0cdefb6dee1e Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Thu, 17 Oct 2024 12:11:56 +0100 Subject: [PATCH 126/141] chore: Fix some grammatical errors in cluster alerts Signed-off-by: Vincent Liu --- ocaml/xapi/xapi_cluster_helpers.ml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ocaml/xapi/xapi_cluster_helpers.ml b/ocaml/xapi/xapi_cluster_helpers.ml index f7ea78eab9d..b46389f8a86 100644 --- a/ocaml/xapi/xapi_cluster_helpers.ml +++ b/ocaml/xapi/xapi_cluster_helpers.ml @@ -126,7 +126,7 @@ let maybe_generate_alert ~__context ~num_hosts ~hosts_left ~hosts_joined ~quorum let body = Printf.sprintf "Host %s has joined the cluster, there are now %d host(s) in \ - cluster and %d hosts are required to form a quorum" + cluster and %d host(s) are required to form a quorum" host_name num_hosts quorum in let name, priority = Api_messages.cluster_host_joining in @@ -135,7 +135,7 @@ let maybe_generate_alert ~__context ~num_hosts ~hosts_left ~hosts_joined ~quorum let body = Printf.sprintf "Host %s has left the cluster, there are now %d host(s) in \ - cluster and %d hosts are required to form a quorum" + cluster and %d host(s) are required to form a quorum" host_name num_hosts quorum in let name, priority = Api_messages.cluster_host_leaving in @@ -157,8 +157,8 @@ let maybe_generate_alert ~__context ~num_hosts ~hosts_left ~hosts_joined ~quorum let name, priority = Api_messages.cluster_quorum_approaching_lost in let body = Printf.sprintf - "The cluster is losing quorum: current %d hosts, need %d hosts for a \ - quorum" + "The cluster is losing quorum: currently %d host(s), need %d host(s) \ + for a quorum" num_hosts quorum in Helpers.call_api_functions ~__context (fun rpc session_id -> From 8465e1bb961fd5eb4097727de9154f9ae6b8877f Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Thu, 17 Oct 2024 10:24:10 +0000 Subject: [PATCH 127/141] buf_io: remove unused function input_line Signed-off-by: Rob Hoes --- ocaml/libs/http-lib/buf_io.ml | 44 --------------------------------- ocaml/libs/http-lib/buf_io.mli | 10 -------- ocaml/libs/http-lib/http_svr.ml | 3 --- 3 files changed, 57 deletions(-) diff --git a/ocaml/libs/http-lib/buf_io.ml b/ocaml/libs/http-lib/buf_io.ml index 3b7ca1ebd14..12da51cb22f 100644 --- a/ocaml/libs/http-lib/buf_io.ml +++ b/ocaml/libs/http-lib/buf_io.ml @@ -15,18 +15,10 @@ type t = {fd: Unix.file_descr; buf: bytes; mutable cur: int; mutable max: int} -type err = - | (* Line input is > 1024 chars *) - Too_long - | (* EOF found, with no newline *) - No_newline - exception Timeout (* Waited too long for data to appear *) exception Eof -exception Line of err (* Raised by input_line only *) - let infinite_timeout = -1. let of_fd fd = @@ -61,15 +53,6 @@ let shift ic = ic.cur <- 0 ) -(* Check to see if we've got a line (ending in \n) in the buffer *) -let got_line ic = - try - let n = Bytes.index_from ic.buf ic.cur '\n' in - if n >= ic.max then -1 else n - with Not_found -> -1 - -let is_full ic = ic.cur = 0 && ic.max = Bytes.length ic.buf - (* Fill the buffer with everything that's ready to be read (up to the limit of the buffer *) let fill_buf ~buffered ic timeout = let buf_size = Bytes.length ic.buf in @@ -104,33 +87,6 @@ let fill_buf ~buffered ic timeout = ignore (fill_no_exc (Some 1e-6) tofillsz) ) -(** Input one line terminated by \n *) -let input_line ?(timeout = 60.0) ic = - (* See if we've already input a line *) - let n = got_line ic in - let rec get_line () = - fill_buf ~buffered:false ic timeout ; - let n = got_line ic in - if n < 0 && not (is_full ic) then - get_line () - else - n - in - let n = if n < 0 then get_line () else n in - (* Still no \n? then either we've run out of data, or we've run out of space *) - if n < 0 then - if ic.max = Bytes.length ic.buf then - raise (Line Too_long) - else ( - Printf.printf "got: '%s'\n" - (Bytes.sub_string ic.buf ic.cur (ic.max - ic.cur)) ; - raise (Line No_newline) - ) ; - (* Return the line, stripping the newline *) - let result = Bytes.sub ic.buf ic.cur (n - ic.cur) in - ic.cur <- n + 1 ; - result - (** Input 'len' characters from ic and put them into the bytestring 'b' starting from 'from' *) let rec really_input ?(timeout = 15.0) ic b from len = if len = 0 then diff --git a/ocaml/libs/http-lib/buf_io.mli b/ocaml/libs/http-lib/buf_io.mli index c6dafb2840b..fc76f1932e2 100644 --- a/ocaml/libs/http-lib/buf_io.mli +++ b/ocaml/libs/http-lib/buf_io.mli @@ -24,9 +24,6 @@ val infinite_timeout : float (** {2 Input functions} *) -val input_line : ?timeout:float -> t -> bytes -(** Input one line terminated by \n *) - val really_input : ?timeout:float -> t -> bytes -> int -> int -> unit (** Input 'len' characters from ic and put them into the string 'str' starting from 'from' *) @@ -39,13 +36,6 @@ exception Timeout exception Eof -(** Raised by input_line only *) -type err = - | Too_long (** Line input is > 1024 chars *) - | No_newline (** EOF found, with no newline *) - -exception Line of err - (** {2 Internal functions} *) val is_buffer_empty : t -> bool diff --git a/ocaml/libs/http-lib/http_svr.ml b/ocaml/libs/http-lib/http_svr.ml index d033fb7805f..738ddcb8d69 100644 --- a/ocaml/libs/http-lib/http_svr.ml +++ b/ocaml/libs/http-lib/http_svr.ml @@ -483,9 +483,6 @@ let request_of_bio ?proxy_seen ~read_timeout ~total_timeout ~max_length ic = | Buf_io.Eof -> () (* Connection terminated *) - | Buf_io.Line _ -> - response_internal_error e ss - ~extra:"One of the header lines was too long." (* Generic errors thrown during parsing *) | End_of_file -> () From 5770f42336c2912ee2eb4d8c18a9654eb06b2e5c Mon Sep 17 00:00:00 2001 From: Ross Lagerwall Date: Thu, 17 Oct 2024 17:08:22 +0100 Subject: [PATCH 128/141] Access pvsproxy via a socket in /run The pvsproxy socket is available in both /opt/ and /run. Since /run is a more sensible location for a socket, use that one to allow the other to be removed in the future. Signed-off-by: Ross Lagerwall --- ocaml/networkd/bin/network_server.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/networkd/bin/network_server.ml b/ocaml/networkd/bin/network_server.ml index d0b21a125d6..289ef665932 100644 --- a/ocaml/networkd/bin/network_server.ml +++ b/ocaml/networkd/bin/network_server.ml @@ -1474,7 +1474,7 @@ end module PVS_proxy = struct open S.PVS_proxy - let path = ref "/opt/citrix/pvsproxy/socket/pvsproxy" + let path = ref "/run/pvsproxy" let do_call call = try Jsonrpc_client.with_rpc ~path:!path ~call () From c02f6530a9e85fa6e7ff23c7fad6739a5da7b28b Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Thu, 17 Oct 2024 13:38:51 +0000 Subject: [PATCH 129/141] http-svr: change request_of_bio(_exn) to read_request(_exn) The function is now passed a plain fd rather than a Buf_io.t ("bio") value, as it does not actually use the buffered channel and just read from the fd directly (using `Http.read_http_request_header`). There used to be an older version if `request_from_bio` that had an option to read requests in a different way and that did use Buf_io. This was called the "slow path" and was removed in bc2ff459 in favour of the current "fast path". This further clean-up opportunity was missed at that time. Signed-off-by: Rob Hoes --- ocaml/libs/http-lib/http_svr.ml | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/ocaml/libs/http-lib/http_svr.ml b/ocaml/libs/http-lib/http_svr.ml index 738ddcb8d69..1a846ea629a 100644 --- a/ocaml/libs/http-lib/http_svr.ml +++ b/ocaml/libs/http-lib/http_svr.ml @@ -346,11 +346,9 @@ let escape uri = exception Generic_error of string -(** [request_of_bio_exn ic] reads a single Http.req from [ic] and returns it. On error +(** [read_request_exn fd] reads a single Http.req from [fd] and returns it. On error it simply throws an exception and doesn't touch the output stream. *) -let request_of_bio_exn ~proxy_seen ~read_timeout ~total_timeout ~max_length bio - = - let fd = Buf_io.fd_of bio in +let read_request_exn ~proxy_seen ~read_timeout ~total_timeout ~max_length fd = let frame, headers, proxy' = Http.read_http_request_header ~read_timeout ~total_timeout ~max_length fd in @@ -440,9 +438,9 @@ let request_of_bio_exn ~proxy_seen ~read_timeout ~total_timeout ~max_length bio in (request, proxy) -(** [request_of_bio ic] returns [Some req] read from [ic], or [None]. If [None] it will have +(** [read_request fd] returns [Some req] read from [fd], or [None]. If [None] it will have already sent back a suitable error code and response to the client. *) -let request_of_bio ?proxy_seen ~read_timeout ~total_timeout ~max_length ic = +let read_request ?proxy_seen ~read_timeout ~total_timeout ~max_length fd = try let tracer = Tracing.Tracer.get_tracer ~name:"http_tracer" in let loop_span = @@ -453,7 +451,7 @@ let request_of_bio ?proxy_seen ~read_timeout ~total_timeout ~max_length ic = None in let r, proxy = - request_of_bio_exn ~proxy_seen ~read_timeout ~total_timeout ~max_length ic + read_request_exn ~proxy_seen ~read_timeout ~total_timeout ~max_length fd in let parent_span = Http.Request.traceparent_of r in let loop_span = @@ -470,35 +468,29 @@ let request_of_bio ?proxy_seen ~read_timeout ~total_timeout ~max_length ic = with e -> D.warn "%s (%s)" (Printexc.to_string e) __LOC__ ; best_effort (fun () -> - let ss = Buf_io.fd_of ic in match e with (* Specific errors thrown during parsing *) | Http.Http_parse_failure -> - response_internal_error e ss + response_internal_error e fd ~extra:"The HTTP headers could not be parsed." ; debug "Error parsing HTTP headers" - | Buf_io.Timeout -> - () - (* Idle connection closed. NB infinite timeout used when headers are being read *) - | Buf_io.Eof -> - () (* Connection terminated *) (* Generic errors thrown during parsing *) | End_of_file -> () | Unix.Unix_error (Unix.EAGAIN, _, _) | Http.Timeout -> - response_request_timeout ss + response_request_timeout fd | Http.Too_large -> - response_request_header_fields_too_large ss + response_request_header_fields_too_large fd (* Premature termination of connection! *) | Unix.Unix_error (a, b, c) -> - response_internal_error e ss + response_internal_error e fd ~extra: (Printf.sprintf "Got UNIX error: %s %s %s" (Unix.error_message a) b c ) | exc -> - response_internal_error exc ss + response_internal_error exc fd ~extra:(escape (Printexc.to_string exc)) ; log_backtrace () ) ; @@ -571,7 +563,6 @@ let handle_connection ~header_read_timeout ~header_total_timeout (Unix.string_of_inet_addr addr) port ) ; - let ic = Buf_io.of_fd ss in (* For HTTPS requests, a PROXY header is sent by stunnel right at the beginning of of its connection to the server, before HTTP requests are transferred, and just once per connection. To allow for the PROXY metadata (including e.g. the @@ -580,8 +571,8 @@ let handle_connection ~header_read_timeout ~header_total_timeout let rec loop ~read_timeout ~total_timeout proxy_seen = (* 1. we must successfully parse a request *) let req, proxy = - request_of_bio ?proxy_seen ~read_timeout ~total_timeout - ~max_length:max_header_length ic + read_request ?proxy_seen ~read_timeout ~total_timeout + ~max_length:max_header_length ss in (* 2. now we attempt to process the request *) From a49ae63b1c9fc8f2f2a463a8829201579acba932 Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Thu, 17 Oct 2024 14:13:10 +0000 Subject: [PATCH 130/141] xmlrpc_client: remove us of Buf_io The function `check_reusable_inner` used Buf_io to read a fixed-length HTTP response and then discarded the buffer. This is functionally the same as using `Unixext.really_read_string`, so do that instead. Signed-off-by: Rob Hoes --- ocaml/libs/http-lib/xmlrpc_client.ml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/ocaml/libs/http-lib/xmlrpc_client.ml b/ocaml/libs/http-lib/xmlrpc_client.ml index a93bda5e888..5bf43b0268c 100644 --- a/ocaml/libs/http-lib/xmlrpc_client.ml +++ b/ocaml/libs/http-lib/xmlrpc_client.ml @@ -87,10 +87,7 @@ let check_reusable_inner (x : Unixfd.t) = match response.Http.Response.content_length with | Some len -> ( let len = Int64.to_int len in - let tmp = Bytes.make len 'X' in - let buf = Buf_io.of_fd Unixfd.(!x) in - Buf_io.really_input buf tmp 0 len ; - let tmp = Bytes.unsafe_to_string tmp in + let tmp = Unixext.really_read_string Unixfd.(!x) len in match XMLRPC.From.methodResponse (Xml.parse_string tmp) with | XMLRPC.Failure ("MESSAGE_METHOD_UNKNOWN", [param]) when param = msg_func -> From 8a829ec8e1d5bba2de0f780d9119edb138a9eb44 Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Thu, 17 Oct 2024 14:30:15 +0000 Subject: [PATCH 131/141] http-svr: remove read from Buf_io in read_body At this point, the only function in the entire code base that read from a Buf_io.t is `Http_svr.read_body` (apart from a test for Buf_io). However, it only does so if the buffer is not empty and falls back to reading directly from the fd is not. And since nothing else reads from a Buf_io, the buffer is always empty... Signed-off-by: Rob Hoes --- ocaml/libs/http-lib/http_svr.ml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/ocaml/libs/http-lib/http_svr.ml b/ocaml/libs/http-lib/http_svr.ml index 1a846ea629a..d75726f184c 100644 --- a/ocaml/libs/http-lib/http_svr.ml +++ b/ocaml/libs/http-lib/http_svr.ml @@ -696,10 +696,7 @@ let read_body ?limit req bio = if length > l then raise Client_requested_size_over_limit ) limit ; - if Buf_io.is_buffer_empty bio then - Unixext.really_read_string (Buf_io.fd_of bio) length - else - Buf_io.really_input_buf ~timeout:Buf_io.infinite_timeout bio length + Unixext.really_read_string (Buf_io.fd_of bio) length (* Helpers to determine the client of a call *) From 8d2bd136288868ebdaebf03efdf9a5b4b5a4fd37 Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Thu, 17 Oct 2024 15:24:14 +0000 Subject: [PATCH 132/141] xapi_http: unify cases in add_handler The main difference between the BufIO and FdIO cases was that the former calls `assert_credentials_ok` with the `callback` in its `~fn` parameter, while the latter executed the `callback` directly after the credentials check. The function `assert_credentials_ok` either calls `fn` or raises an exception. Well, nearly... It did not actually call `fn` in the unix socket case, where checks are bypassed. This looks unintended and this patch corrects it. This only affects the following handlers in xapi, which use BufIO and require RBAC checks: post_remote_db_access, post_remote_db_access_v2, get_wlb_report, get_wlb_diagnostics, get_audit_log. I guess those were simply never used on the unix socket. The other thing that happens when using `~fn` is that the function `Rbac_audit.allowed_post_fn_ok` is called after `~fn`. This writes an "ALLOWED OK" line to the audit log. I don't see a reason not to do the same in all cases. The outcome is that now both cases of `add_handler` do the same and only the channel types are different. In the following commit the two handler types are joining into a single one, which is now easier. Signed-off-by: Rob Hoes --- ocaml/xapi/xapi_http.ml | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/ocaml/xapi/xapi_http.ml b/ocaml/xapi/xapi_http.ml index 65de926376c..93a6e23525f 100644 --- a/ocaml/xapi/xapi_http.ml +++ b/ocaml/xapi/xapi_http.ml @@ -137,7 +137,7 @@ let assert_credentials_ok realm ?(http_action = realm) ?(fn = Rbac.nofn) ) in if Context.is_unix_socket ic then - () + fn () (* Connections from unix-domain socket implies you're root on the box, ergo everything is OK *) else match @@ -367,7 +367,7 @@ let add_handler (name, handler) = try if check_rbac then ( try - (* rbac checks *) + (* session and rbac checks *) assert_credentials_ok name req ~fn:(fun () -> callback req ic context) (Buf_io.fd_of ic) @@ -395,9 +395,18 @@ let add_handler (name, handler) = Debug.with_thread_associated ?client name (fun () -> try - if check_rbac then assert_credentials_ok name req ic ; - (* session and rbac checks *) - callback req ic context + if check_rbac then ( + try + (* session and rbac checks *) + assert_credentials_ok name req + ~fn:(fun () -> callback req ic context) + ic + with e -> + debug "Leaving RBAC-handler in xapi_http after: %s" + (ExnHelper.string_of_exn e) ; + raise e + ) else (* no rbac checks *) + callback req ic context with Api_errors.Server_error (name, params) as e -> error "Unhandled Api_errors.Server_error(%s, [ %s ])" name (String.concat "; " params) ; From ff9ce6d8e9be8c19a18d8a47b30c40cb11e45581 Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Thu, 17 Oct 2024 16:31:44 +0000 Subject: [PATCH 133/141] Remove BufIO HTTP handler type completely HTTP handlers of type BufIO did not actually read from through the buffer at all. Instead, they all assert that the buffer is empty and then simply use the file descriptor. All HTTP handlers now directly use file descriptors. The handler type simply becomes: type 'a handler = Http.Request.t -> Unix.file_descr -> 'a -> unit Signed-off-by: Rob Hoes --- ocaml/database/db_remote_cache_access_v1.ml | 5 +- ocaml/database/db_remote_cache_access_v1.mli | 2 +- ocaml/database/db_remote_cache_access_v2.ml | 5 +- ocaml/database/db_remote_cache_access_v2.mli | 2 +- ocaml/libs/http-lib/http_svr.ml | 28 ++--- ocaml/libs/http-lib/http_svr.mli | 6 +- ocaml/xapi-cli-server/xapi_cli.ml | 7 +- ocaml/xapi/api_server.ml | 14 +-- ocaml/xapi/audit_log.ml | 4 +- ocaml/xapi/fileserver.ml | 4 +- ocaml/xapi/storage_mux.ml | 5 +- ocaml/xapi/wlb_reports.ml | 13 +- ocaml/xapi/xapi.ml | 123 ++++++++----------- ocaml/xapi/xapi_http.ml | 89 ++++---------- ocaml/xapi/xapi_services.ml | 4 +- ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml | 21 ++-- 16 files changed, 122 insertions(+), 210 deletions(-) diff --git a/ocaml/database/db_remote_cache_access_v1.ml b/ocaml/database/db_remote_cache_access_v1.ml index d1d14cc3508..ec198755739 100644 --- a/ocaml/database/db_remote_cache_access_v1.ml +++ b/ocaml/database/db_remote_cache_access_v1.ml @@ -126,11 +126,10 @@ module DBCacheRemoteListener = struct raise e end -let handler req bio _ = - let fd = Buf_io.fd_of bio in +let handler req fd _ = (* fd only used for writing *) let body = - Http_svr.read_body ~limit:Db_globs.http_limit_max_rpc_size req bio + Http_svr.read_body ~limit:Db_globs.http_limit_max_rpc_size req fd in let body_xml = Xml.parse_string body in let reply_xml = DBCacheRemoteListener.process_xmlrpc body_xml in diff --git a/ocaml/database/db_remote_cache_access_v1.mli b/ocaml/database/db_remote_cache_access_v1.mli index 4f4e34db7ea..1ed77e081aa 100644 --- a/ocaml/database/db_remote_cache_access_v1.mli +++ b/ocaml/database/db_remote_cache_access_v1.mli @@ -1,2 +1,2 @@ -val handler : Http.Request.t -> Buf_io.t -> 'a -> unit +val handler : Http.Request.t -> Unix.file_descr -> 'a -> unit (** HTTP handler for v1 of the remote DB access protocol *) diff --git a/ocaml/database/db_remote_cache_access_v2.ml b/ocaml/database/db_remote_cache_access_v2.ml index 04de044064c..040ad215600 100644 --- a/ocaml/database/db_remote_cache_access_v2.ml +++ b/ocaml/database/db_remote_cache_access_v2.ml @@ -67,11 +67,10 @@ let process_rpc (req : Rpc.t) = Response.Too_many_values (x, y, z) ) -let handler req bio _ = - let fd = Buf_io.fd_of bio in +let handler req fd _ = (* fd only used for writing *) let body = - Http_svr.read_body ~limit:Db_globs.http_limit_max_rpc_size req bio + Http_svr.read_body ~limit:Db_globs.http_limit_max_rpc_size req fd in let request_rpc = Jsonrpc.of_string body in let reply_rpc = process_rpc request_rpc in diff --git a/ocaml/database/db_remote_cache_access_v2.mli b/ocaml/database/db_remote_cache_access_v2.mli index 57790e4d072..09fc7397af8 100644 --- a/ocaml/database/db_remote_cache_access_v2.mli +++ b/ocaml/database/db_remote_cache_access_v2.mli @@ -1,2 +1,2 @@ -val handler : Http.Request.t -> Buf_io.t -> 'a -> unit +val handler : Http.Request.t -> Unix.file_descr -> 'a -> unit (** HTTP handler for v2 of the remote DB access protocol *) diff --git a/ocaml/libs/http-lib/http_svr.ml b/ocaml/libs/http-lib/http_svr.ml index d75726f184c..3c8ec7facbb 100644 --- a/ocaml/libs/http-lib/http_svr.ml +++ b/ocaml/libs/http-lib/http_svr.ml @@ -67,9 +67,7 @@ module Stats = struct end (** Type of a function which can handle a Request.t *) -type 'a handler = - | BufIO of (Http.Request.t -> Buf_io.t -> 'a -> unit) - | FdIO of (Http.Request.t -> Unix.file_descr -> 'a -> unit) +type 'a handler = Http.Request.t -> Unix.file_descr -> 'a -> unit (* try and do f (unit -> unit), ignore exceptions *) let best_effort f = try f () with _ -> () @@ -270,19 +268,15 @@ let respond_to_options req s = (fun _ -> ()) (** If no handler matches the request then call this callback *) -let default_callback req bio _ = - response_forbidden (Buf_io.fd_of bio) ; +let default_callback req fd _ = + response_forbidden fd ; req.Request.close <- true module TE = struct type 'a t = {stats: Stats.t; stats_m: Mutex.t; handler: 'a handler} let empty () = - { - stats= Stats.empty () - ; stats_m= Mutex.create () - ; handler= BufIO default_callback - } + {stats= Stats.empty (); stats_m= Mutex.create (); handler= default_callback} end module MethodMap = Map.Make (struct @@ -499,7 +493,6 @@ let read_request ?proxy_seen ~read_timeout ~total_timeout ~max_length fd = let handle_one (x : 'a Server.t) ss context req = let@ req = Http.Request.with_tracing ~name:__FUNCTION__ req in let span = Http.Request.traceparent_of req in - let ic = Buf_io.of_fd ss in let finished = ref false in try D.debug "Request %s" (Http.Request.to_string req) ; @@ -513,14 +506,7 @@ let handle_one (x : 'a Server.t) ss context req = (Radix_tree.longest_prefix req.Request.uri method_map) in let@ _ = Tracing.with_child_trace span ~name:"handler" in - ( match te.TE.handler with - | BufIO handlerfn -> - handlerfn req ic context - | FdIO handlerfn -> - let fd = Buf_io.fd_of ic in - Buf_io.assert_buffer_empty ic ; - handlerfn req fd context - ) ; + te.TE.handler req ss context ; finished := req.Request.close ; Stats.update te.TE.stats te.TE.stats_m req ; !finished @@ -685,7 +671,7 @@ let stop (socket, _name) = exception Client_requested_size_over_limit (** Read the body of an HTTP request (requires a content-length: header). *) -let read_body ?limit req bio = +let read_body ?limit req fd = match req.Request.content_length with | None -> failwith "We require a content-length: HTTP header" @@ -696,7 +682,7 @@ let read_body ?limit req bio = if length > l then raise Client_requested_size_over_limit ) limit ; - Unixext.really_read_string (Buf_io.fd_of bio) length + Unixext.really_read_string fd length (* Helpers to determine the client of a call *) diff --git a/ocaml/libs/http-lib/http_svr.mli b/ocaml/libs/http-lib/http_svr.mli index 52f6719dd0a..101479d100d 100644 --- a/ocaml/libs/http-lib/http_svr.mli +++ b/ocaml/libs/http-lib/http_svr.mli @@ -18,9 +18,7 @@ type uri_path = string (** A handler is a function which takes a request and produces a response *) -type 'a handler = - | BufIO of (Http.Request.t -> Buf_io.t -> 'a -> unit) - | FdIO of (Http.Request.t -> Unix.file_descr -> 'a -> unit) +type 'a handler = Http.Request.t -> Unix.file_descr -> 'a -> unit module Stats : sig (** Statistics recorded per-handler *) @@ -120,7 +118,7 @@ val respond_to_options : Http.Request.t -> Unix.file_descr -> unit val headers : Unix.file_descr -> string list -> unit -val read_body : ?limit:int -> Http.Request.t -> Buf_io.t -> string +val read_body : ?limit:int -> Http.Request.t -> Unix.file_descr -> string (* Helpers to determine the client of a call *) diff --git a/ocaml/xapi-cli-server/xapi_cli.ml b/ocaml/xapi-cli-server/xapi_cli.ml index 59c033efb74..bc2389d4c44 100644 --- a/ocaml/xapi-cli-server/xapi_cli.ml +++ b/ocaml/xapi-cli-server/xapi_cli.ml @@ -346,11 +346,8 @@ let exception_handler s e = [Cli_util.string_of_exn exc] s -let handler (req : Http.Request.t) (bio : Buf_io.t) _ = - let str = - Http_svr.read_body ~limit:Constants.http_limit_max_cli_size req bio - in - let s = Buf_io.fd_of bio in +let handler (req : Http.Request.t) (s : Unix.file_descr) _ = + let str = Http_svr.read_body ~limit:Constants.http_limit_max_cli_size req s in (* Tell the client the server version *) marshal_protocol s ; (* Read the client's protocol version *) diff --git a/ocaml/xapi/api_server.ml b/ocaml/xapi/api_server.ml index 38f39e9b50f..35cb14103e3 100644 --- a/ocaml/xapi/api_server.ml +++ b/ocaml/xapi/api_server.ml @@ -90,13 +90,12 @@ let create_thumbprint_header req response = ) (** HTML callback that dispatches an RPC and returns the response. *) -let callback is_json req bio _ = +let callback is_json req fd _ = let@ req = Http.Request.with_tracing ~name:__FUNCTION__ req in let span = Http.Request.traceparent_of req in - let fd = Buf_io.fd_of bio in (* fd only used for writing *) let body = - Http_svr.read_body ~limit:Constants.http_limit_max_rpc_size req bio + Http_svr.read_body ~limit:Constants.http_limit_max_rpc_size req fd in try let rpc = @@ -145,13 +144,12 @@ let callback is_json req bio _ = Backtrace.is_important e ; raise e (** HTML callback that dispatches an RPC and returns the response. *) -let jsoncallback req bio _ = +let jsoncallback req fd _ = let@ req = Http.Request.with_tracing ~name:__FUNCTION__ req in - let fd = Buf_io.fd_of bio in (* fd only used for writing *) let body = Http_svr.read_body ~limit:Xapi_database.Db_globs.http_limit_max_rpc_size req - bio + fd in try let json_rpc_version, id, rpc = @@ -182,6 +180,4 @@ let jsoncallback req bio _ = ) ) -let options_callback req bio _ = - let fd = Buf_io.fd_of bio in - Http_svr.respond_to_options req fd +let options_callback req fd _ = Http_svr.respond_to_options req fd diff --git a/ocaml/xapi/audit_log.ml b/ocaml/xapi/audit_log.ml index d4e9ab14d65..0563b2c4fe3 100644 --- a/ocaml/xapi/audit_log.ml +++ b/ocaml/xapi/audit_log.ml @@ -122,9 +122,7 @@ let log_timestamp_of_iso8601 iso8601_timestamp = eg. /audit_log?...&since=2009-09-10T11:31 eg. /audit_log?...&since=2009-09-10 *) -let handler (req : Request.t) (bio : Buf_io.t) _ = - let s = Buf_io.fd_of bio in - Buf_io.assert_buffer_empty bio ; +let handler (req : Request.t) (s : Unix.file_descr) _ = req.Request.close <- true ; Xapi_http.with_context (* makes sure to signal task-completed to cli *) (Printf.sprintf "audit_log_get request") req s (fun __context -> diff --git a/ocaml/xapi/fileserver.ml b/ocaml/xapi/fileserver.ml index ed9ed334d66..4931d419918 100644 --- a/ocaml/xapi/fileserver.ml +++ b/ocaml/xapi/fileserver.ml @@ -55,10 +55,8 @@ let access_forbidden req s = !Xapi_globs.website_https_only && is_external_http req s let send_file (uri_base : string) (dir : string) (req : Request.t) - (bio : Buf_io.t) _ = + (s : Unix.file_descr) _ = let uri_base_len = String.length uri_base in - let s = Buf_io.fd_of bio in - Buf_io.assert_buffer_empty bio ; let is_external_http = is_external_http req s in if is_external_http && !Xapi_globs.website_https_only then Http_svr.response_forbidden ~req s diff --git a/ocaml/xapi/storage_mux.ml b/ocaml/xapi/storage_mux.ml index b14476a3d9d..a2cfc468f5f 100644 --- a/ocaml/xapi/storage_mux.ml +++ b/ocaml/xapi/storage_mux.ml @@ -880,9 +880,8 @@ module Local_domain_socket = struct let path = Filename.concat "/var/lib/xcp" "storage" (* receives external requests on Constants.sm_uri *) - let xmlrpc_handler process req bio _ = - let body = Http_svr.read_body req bio in - let s = Buf_io.fd_of bio in + let xmlrpc_handler process req s _ = + let body = Http_svr.read_body req s in let rpc = Xmlrpc.call_of_string body in (* Printf.fprintf stderr "Request: %s %s\n%!" rpc.Rpc.name (Rpc.to_string (List.hd rpc.Rpc.params)); *) let result = process rpc in diff --git a/ocaml/xapi/wlb_reports.ml b/ocaml/xapi/wlb_reports.ml index 07b71252e61..baad7f6b35b 100644 --- a/ocaml/xapi/wlb_reports.ml +++ b/ocaml/xapi/wlb_reports.ml @@ -144,9 +144,8 @@ let trim_and_send method_name tag recv_sock send_sock = Workload_balancing.raise_malformed_response' method_name "Expected data is truncated." s -let handle req bio _method_name tag (method_name, request_func) = - let client_sock = Buf_io.fd_of bio in - Buf_io.assert_buffer_empty bio ; +let handle req fd _method_name tag (method_name, request_func) = + let client_sock = fd in debug "handle: fd = %d" (Xapi_stdext_unix.Unixext.int_of_file_descr client_sock) ; req.Request.close <- true ; @@ -171,7 +170,7 @@ let handle req bio _method_name tag (method_name, request_func) = (* GET /wlb_report?session_id=&task_id=& report=&=&... *) -let report_handler (req : Request.t) (bio : Buf_io.t) _ = +let report_handler (req : Request.t) (fd : Unix.file_descr) _ = if not (List.mem_assoc "report" req.Request.query) then ( error "Request for WLB report lacked 'report' parameter" ; failwith "Bad request" @@ -182,10 +181,10 @@ let report_handler (req : Request.t) (bio : Buf_io.t) _ = (fun (k, _) -> not (List.mem k ["session_id"; "task_id"; "report"])) req.Request.query in - handle req bio "ExecuteReport" report_tag + handle req fd "ExecuteReport" report_tag (Workload_balancing.wlb_report_request report params) (* GET /wlb_diagnostics?session_id=&task_id= *) -let diagnostics_handler (req : Request.t) (bio : Buf_io.t) _ = - handle req bio "GetDiagnostics" diagnostics_tag +let diagnostics_handler (req : Request.t) (fd : Unix.file_descr) _ = + handle req fd "GetDiagnostics" diagnostics_tag Workload_balancing.wlb_diagnostics_request diff --git a/ocaml/xapi/xapi.ml b/ocaml/xapi/xapi.ml index b702001ef2e..ed6323663e3 100644 --- a/ocaml/xapi/xapi.ml +++ b/ocaml/xapi/xapi.ml @@ -786,86 +786,69 @@ let startup_script () = let master_only_http_handlers = [ (* CA-26044: don't let people DoS random slaves *) - ("post_remote_db_access", Http_svr.BufIO remote_database_access_handler) - ; ( "post_remote_db_access_v2" - , Http_svr.BufIO remote_database_access_handler_v2 - ) - ; ("get_repository", Http_svr.FdIO Repository.get_repository_handler) - ; ("get_updates", Http_svr.FdIO Xapi_pool.get_updates_handler) + ("post_remote_db_access", remote_database_access_handler) + ; ("post_remote_db_access_v2", remote_database_access_handler_v2) + ; ("get_repository", Repository.get_repository_handler) + ; ("get_updates", Xapi_pool.get_updates_handler) ] let common_http_handlers () = let handlers = [ - ("get_services_xenops", Http_svr.FdIO Xapi_services.get_handler) - ; ("put_services_xenops", Http_svr.FdIO Xapi_services.put_handler) - ; ("post_services_xenops", Http_svr.FdIO Xapi_services.post_handler) - ; ("get_services_sm", Http_svr.FdIO Xapi_services.get_handler) - ; ("put_services_sm", Http_svr.FdIO Xapi_services.put_handler) - ; ("post_services_sm", Http_svr.FdIO Xapi_services.post_handler) - ; ("get_services", Http_svr.FdIO Xapi_services.get_handler) - ; ("post_services", Http_svr.FdIO Xapi_services.post_handler) - ; ("put_services", Http_svr.FdIO Xapi_services.put_handler) - ; ("put_import", Http_svr.FdIO Import.handler) - ; ("put_import_metadata", Http_svr.FdIO Import.metadata_handler) - ; ("put_import_raw_vdi", Http_svr.FdIO Import_raw_vdi.handler) - ; ("get_export", Http_svr.FdIO Export.handler) - ; ("get_export_metadata", Http_svr.FdIO Export.metadata_handler) - ; ("get_export_raw_vdi", Http_svr.FdIO Export_raw_vdi.handler) - ; ("connect_console", Http_svr.FdIO (Console.handler Console.real_proxy)) - ; ("connect_console_ws", Http_svr.FdIO (Console.handler Console.ws_proxy)) - ; ("post_cli", Http_svr.BufIO Xapi_cli.handler) - ; ("get_host_backup", Http_svr.FdIO Xapi_host_backup.host_backup_handler) - ; ("put_host_restore", Http_svr.FdIO Xapi_host_backup.host_restore_handler) - ; ( "get_host_logs_download" - , Http_svr.FdIO Xapi_logs_download.logs_download_handler - ) - ; ( "put_pool_patch_upload" - , Http_svr.FdIO Xapi_pool_patch.pool_patch_upload_handler - ) - ; ("get_vncsnapshot", Http_svr.FdIO Xapi_vncsnapshot.vncsnapshot_handler) - ; ( "get_pool_xml_db_sync" - , Http_svr.FdIO Pool_db_backup.pull_database_backup_handler - ) - ; ( "put_pool_xml_db_sync" - , Http_svr.FdIO Pool_db_backup.push_database_restore_handler - ) - ; ( "get_config_sync" - , Http_svr.FdIO Config_file_sync.config_file_sync_handler - ) - ; ("get_system_status", Http_svr.FdIO System_status.handler) - ; (Constants.get_vm_rrd, Http_svr.FdIO Rrdd_proxy.get_vm_rrd_forwarder) - ; (Constants.get_host_rrd, Http_svr.FdIO Rrdd_proxy.get_host_rrd_forwarder) - ; (Constants.get_sr_rrd, Http_svr.FdIO Rrdd_proxy.get_sr_rrd_forwarder) - ; ( Constants.get_rrd_updates - , Http_svr.FdIO Rrdd_proxy.get_rrd_updates_forwarder - ) - ; (Constants.put_rrd, Http_svr.FdIO Rrdd_proxy.put_rrd_forwarder) - ; ("get_blob", Http_svr.FdIO Xapi_blob.handler) - ; ("put_blob", Http_svr.FdIO Xapi_blob.handler) - ; ("put_messages", Http_svr.FdIO Xapi_message.handler) - ; ("connect_remotecmd", Http_svr.FdIO Xapi_remotecmd.handler) - ; ("get_wlb_report", Http_svr.BufIO Wlb_reports.report_handler) - ; ("get_wlb_diagnostics", Http_svr.BufIO Wlb_reports.diagnostics_handler) - ; ("get_audit_log", Http_svr.BufIO Audit_log.handler) - ; ("post_root", Http_svr.BufIO (Api_server.callback false)) - ; ("post_json", Http_svr.BufIO (Api_server.callback true)) - ; ("post_jsonrpc", Http_svr.BufIO Api_server.jsoncallback) - ; ("post_root_options", Http_svr.BufIO Api_server.options_callback) - ; ("post_json_options", Http_svr.BufIO Api_server.options_callback) - ; ("post_jsonrpc_options", Http_svr.BufIO Api_server.options_callback) - ; ( "get_pool_update_download" - , Http_svr.FdIO Xapi_pool_update.pool_update_download_handler - ) - ; ("get_host_updates", Http_svr.FdIO Xapi_host.get_host_updates_handler) - ; ("put_bundle", Http_svr.FdIO Xapi_pool.put_bundle_handler) + ("get_services_xenops", Xapi_services.get_handler) + ; ("put_services_xenops", Xapi_services.put_handler) + ; ("post_services_xenops", Xapi_services.post_handler) + ; ("get_services_sm", Xapi_services.get_handler) + ; ("put_services_sm", Xapi_services.put_handler) + ; ("post_services_sm", Xapi_services.post_handler) + ; ("get_services", Xapi_services.get_handler) + ; ("post_services", Xapi_services.post_handler) + ; ("put_services", Xapi_services.put_handler) + ; ("put_import", Import.handler) + ; ("put_import_metadata", Import.metadata_handler) + ; ("put_import_raw_vdi", Import_raw_vdi.handler) + ; ("get_export", Export.handler) + ; ("get_export_metadata", Export.metadata_handler) + ; ("get_export_raw_vdi", Export_raw_vdi.handler) + ; ("connect_console", Console.handler Console.real_proxy) + ; ("connect_console_ws", Console.handler Console.ws_proxy) + ; ("post_cli", Xapi_cli.handler) + ; ("get_host_backup", Xapi_host_backup.host_backup_handler) + ; ("put_host_restore", Xapi_host_backup.host_restore_handler) + ; ("get_host_logs_download", Xapi_logs_download.logs_download_handler) + ; ("put_pool_patch_upload", Xapi_pool_patch.pool_patch_upload_handler) + ; ("get_vncsnapshot", Xapi_vncsnapshot.vncsnapshot_handler) + ; ("get_pool_xml_db_sync", Pool_db_backup.pull_database_backup_handler) + ; ("put_pool_xml_db_sync", Pool_db_backup.push_database_restore_handler) + ; ("get_config_sync", Config_file_sync.config_file_sync_handler) + ; ("get_system_status", System_status.handler) + ; (Constants.get_vm_rrd, Rrdd_proxy.get_vm_rrd_forwarder) + ; (Constants.get_host_rrd, Rrdd_proxy.get_host_rrd_forwarder) + ; (Constants.get_sr_rrd, Rrdd_proxy.get_sr_rrd_forwarder) + ; (Constants.get_rrd_updates, Rrdd_proxy.get_rrd_updates_forwarder) + ; (Constants.put_rrd, Rrdd_proxy.put_rrd_forwarder) + ; ("get_blob", Xapi_blob.handler) + ; ("put_blob", Xapi_blob.handler) + ; ("put_messages", Xapi_message.handler) + ; ("connect_remotecmd", Xapi_remotecmd.handler) + ; ("get_wlb_report", Wlb_reports.report_handler) + ; ("get_wlb_diagnostics", Wlb_reports.diagnostics_handler) + ; ("get_audit_log", Audit_log.handler) + ; ("post_root", Api_server.callback false) + ; ("post_json", Api_server.callback true) + ; ("post_jsonrpc", Api_server.jsoncallback) + ; ("post_root_options", Api_server.options_callback) + ; ("post_json_options", Api_server.options_callback) + ; ("post_jsonrpc_options", Api_server.options_callback) + ; ("get_pool_update_download", Xapi_pool_update.pool_update_download_handler) + ; ("get_host_updates", Xapi_host.get_host_updates_handler) + ; ("put_bundle", Xapi_pool.put_bundle_handler) ] in if !Xapi_globs.disable_webserver then handlers else - ("get_root", Http_svr.BufIO (Fileserver.send_file "/" !Xapi_globs.web_dir)) - :: handlers + ("get_root", Fileserver.send_file "/" !Xapi_globs.web_dir) :: handlers let listen_unix_socket sock_path = (* Always listen on the Unix domain socket first *) diff --git a/ocaml/xapi/xapi_http.ml b/ocaml/xapi/xapi_http.ml index 93a6e23525f..964983d8eda 100644 --- a/ocaml/xapi/xapi_http.ml +++ b/ocaml/xapi/xapi_http.ml @@ -351,70 +351,33 @@ let add_handler (name, handler) = failwith (Printf.sprintf "Unregistered HTTP handler: %s" name) in let check_rbac = Rbac.is_rbac_enabled_for_http_action name in - let h = - match handler with - | Http_svr.BufIO callback -> - Http_svr.BufIO - (fun req ic context -> - let client = - Http_svr.( - client_of_req_and_fd req (Buf_io.fd_of ic) - |> Option.map string_of_client - ) - in - Debug.with_thread_associated ?client name - (fun () -> - try - if check_rbac then ( - try - (* session and rbac checks *) - assert_credentials_ok name req - ~fn:(fun () -> callback req ic context) - (Buf_io.fd_of ic) - with e -> - debug "Leaving RBAC-handler in xapi_http after: %s" - (ExnHelper.string_of_exn e) ; - raise e - ) else (* no rbac checks *) - callback req ic context - with Api_errors.Server_error (name, params) as e -> - error "Unhandled Api_errors.Server_error(%s, [ %s ])" name - (String.concat "; " params) ; - raise (Http_svr.Generic_error (ExnHelper.string_of_exn e)) - ) - () - ) - | Http_svr.FdIO callback -> - Http_svr.FdIO - (fun req ic context -> - let client = - Http_svr.( - client_of_req_and_fd req ic |> Option.map string_of_client - ) - in - Debug.with_thread_associated ?client name - (fun () -> - try - if check_rbac then ( - try - (* session and rbac checks *) - assert_credentials_ok name req - ~fn:(fun () -> callback req ic context) - ic - with e -> - debug "Leaving RBAC-handler in xapi_http after: %s" - (ExnHelper.string_of_exn e) ; - raise e - ) else (* no rbac checks *) - callback req ic context - with Api_errors.Server_error (name, params) as e -> - error "Unhandled Api_errors.Server_error(%s, [ %s ])" name - (String.concat "; " params) ; - raise (Http_svr.Generic_error (ExnHelper.string_of_exn e)) - ) - () - ) + let h req ic context = + let client = + Http_svr.(client_of_req_and_fd req ic |> Option.map string_of_client) + in + Debug.with_thread_associated ?client name + (fun () -> + try + if check_rbac then ( + try + (* session and rbac checks *) + assert_credentials_ok name req + ~fn:(fun () -> handler req ic context) + ic + with e -> + debug "Leaving RBAC-handler in xapi_http after: %s" + (ExnHelper.string_of_exn e) ; + raise e + ) else (* no rbac checks *) + handler req ic context + with Api_errors.Server_error (name, params) as e -> + error "Unhandled Api_errors.Server_error(%s, [ %s ])" name + (String.concat "; " params) ; + raise (Http_svr.Generic_error (ExnHelper.string_of_exn e)) + ) + () in + match action with | meth, uri, _sdk, _sdkargs, _roles, _sub_actions -> let ty = diff --git a/ocaml/xapi/xapi_services.ml b/ocaml/xapi/xapi_services.ml index b88638739bc..8a71c2aca0c 100644 --- a/ocaml/xapi/xapi_services.ml +++ b/ocaml/xapi/xapi_services.ml @@ -156,7 +156,7 @@ let post_handler (req : Http.Request.t) s _ = match String.split_on_char '/' req.Http.Request.uri with | "" :: services :: "xenops" :: _ when services = _services -> (* over the network we still use XMLRPC *) - let request = Http_svr.read_body req (Buf_io.of_fd s) in + let request = Http_svr.read_body req s in let response = if !Xcp_client.use_switch then let req = Xmlrpc.call_of_string request in @@ -178,7 +178,7 @@ let post_handler (req : Http.Request.t) s _ = http_proxy_to_plugin req s name | [""; services; "SM"] when services = _services -> Storage_mux.Local_domain_socket.xmlrpc_handler - Storage_mux.Server.process req (Buf_io.of_fd s) () + Storage_mux.Server.process req s () | _ -> Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()) ; req.Http.Request.close <- true diff --git a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml index 5b20dc77393..48da4c60ae7 100644 --- a/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml +++ b/ocaml/xcp-rrdd/bin/rrdd/xcp_rrdd.ml @@ -34,9 +34,8 @@ open D open Xapi_stdext_pervasives.Pervasiveext (* A helper method for processing XMLRPC requests. *) -let xmlrpc_handler process req bio context = - let body = Http_svr.read_body req bio in - let s = Buf_io.fd_of bio in +let xmlrpc_handler process req s context = + let body = Http_svr.read_body req s in let rpc = Xmlrpc.call_of_string body in try let result = process context rpc in @@ -75,21 +74,19 @@ let accept_forever sock f = let start (xmlrpc_path, http_fwd_path) process = let server = Http_svr.Server.empty () in let open Rrdd_http_handler in - Http_svr.Server.add_handler server Http.Post "/" - (Http_svr.BufIO (xmlrpc_handler process)) ; + Http_svr.Server.add_handler server Http.Post "/" (xmlrpc_handler process) ; Http_svr.Server.add_handler server Http.Get Rrdd_libs.Constants.get_vm_rrd_uri - (Http_svr.FdIO get_vm_rrd_handler) ; + get_vm_rrd_handler ; Http_svr.Server.add_handler server Http.Get - Rrdd_libs.Constants.get_host_rrd_uri (Http_svr.FdIO get_host_rrd_handler) ; + Rrdd_libs.Constants.get_host_rrd_uri get_host_rrd_handler ; Http_svr.Server.add_handler server Http.Get Rrdd_libs.Constants.get_sr_rrd_uri - (Http_svr.FdIO get_sr_rrd_handler) ; + get_sr_rrd_handler ; Http_svr.Server.add_handler server Http.Get - Rrdd_libs.Constants.get_rrd_updates_uri - (Http_svr.FdIO get_rrd_updates_handler) ; + Rrdd_libs.Constants.get_rrd_updates_uri get_rrd_updates_handler ; Http_svr.Server.add_handler server Http.Put Rrdd_libs.Constants.put_rrd_uri - (Http_svr.FdIO put_rrd_handler) ; + put_rrd_handler ; Http_svr.Server.add_handler server Http.Post - Rrdd_libs.Constants.rrd_unarchive_uri (Http_svr.FdIO unarchive_rrd_handler) ; + Rrdd_libs.Constants.rrd_unarchive_uri unarchive_rrd_handler ; Xapi_stdext_unix.Unixext.mkdir_safe (Filename.dirname xmlrpc_path) 0o700 ; Xapi_stdext_unix.Unixext.unlink_safe xmlrpc_path ; let xmlrpc_socket = Http_svr.bind (Unix.ADDR_UNIX xmlrpc_path) "unix_rpc" in From 8e0245531f7ef2cb50d3d6fc464e402eed0ba472 Mon Sep 17 00:00:00 2001 From: Rob Hoes Date: Thu, 17 Oct 2024 16:46:08 +0000 Subject: [PATCH 134/141] Remove now-unused Buf_io and associated tests Signed-off-by: Rob Hoes --- ocaml/database/database_server_main.ml | 4 +- ocaml/libs/http-lib/buf_io.ml | 113 ----------------------- ocaml/libs/http-lib/buf_io.mli | 50 ---------- ocaml/libs/http-lib/bufio_test.ml | 106 ---------------------- ocaml/libs/http-lib/bufio_test.mli | 1 - ocaml/libs/http-lib/bufio_test_run.ml | 1 - ocaml/libs/http-lib/bufio_test_run.mli | 0 ocaml/libs/http-lib/dune | 38 +------- ocaml/libs/http-lib/test_server.ml | 121 +++++++++++-------------- ocaml/quicktest/dune | 1 - ocaml/quicktest/quicktest.ml | 6 +- 11 files changed, 59 insertions(+), 382 deletions(-) delete mode 100644 ocaml/libs/http-lib/buf_io.ml delete mode 100644 ocaml/libs/http-lib/buf_io.mli delete mode 100644 ocaml/libs/http-lib/bufio_test.ml delete mode 100644 ocaml/libs/http-lib/bufio_test.mli delete mode 100644 ocaml/libs/http-lib/bufio_test_run.ml delete mode 100644 ocaml/libs/http-lib/bufio_test_run.mli diff --git a/ocaml/database/database_server_main.ml b/ocaml/database/database_server_main.ml index 1dc59284263..e75539a5592 100644 --- a/ocaml/database/database_server_main.ml +++ b/ocaml/database/database_server_main.ml @@ -80,9 +80,9 @@ let _ = let socket = Http_svr.bind sockaddr "unix_rpc" in let server = Http_svr.Server.empty () in Http_svr.Server.add_handler server Http.Post "/post_remote_db_access" - (Http_svr.BufIO remote_database_access_handler_v1) ; + remote_database_access_handler_v1 ; Http_svr.Server.add_handler server Http.Post "/post_remote_db_access_v2" - (Http_svr.BufIO remote_database_access_handler_v2) ; + remote_database_access_handler_v2 ; Http_svr.start ~conn_limit:1024 server socket ; Printf.printf "server listening\n%!" ; if !self_test then ( diff --git a/ocaml/libs/http-lib/buf_io.ml b/ocaml/libs/http-lib/buf_io.ml deleted file mode 100644 index 12da51cb22f..00000000000 --- a/ocaml/libs/http-lib/buf_io.ml +++ /dev/null @@ -1,113 +0,0 @@ -(* - * Copyright (C) 2006-2009 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) -(* Buffered IO with timeouts *) - -type t = {fd: Unix.file_descr; buf: bytes; mutable cur: int; mutable max: int} - -exception Timeout (* Waited too long for data to appear *) - -exception Eof - -let infinite_timeout = -1. - -let of_fd fd = - (* Unix.set_nonblock fd;*) - { - fd - ; (* FIXME -- this should be larger. Low for testing *) - buf= Bytes.create 1024 - ; cur= 0 - ; max= 0 - } - -let fd_of t = t.fd - -(* Internal functions *) - -let is_buffer_empty ic = ic.max - ic.cur <= 0 - -(* Used as a temporary measure while converting from unbuffered to buffered - I/O in the rest of the software. *) -let assert_buffer_empty ic = - if not (is_buffer_empty ic) then failwith "Buf_io buffer not empty" - -(* Shift the unprocessed data to the beginning of the buffer *) -let shift ic = - if ic.cur = Bytes.length ic.buf (* No unprocessed data!*) then ( - ic.cur <- 0 ; - ic.max <- 0 - ) else ( - Bytes.blit ic.buf ic.cur ic.buf 0 (ic.max - ic.cur) ; - ic.max <- ic.max - ic.cur ; - ic.cur <- 0 - ) - -(* Fill the buffer with everything that's ready to be read (up to the limit of the buffer *) -let fill_buf ~buffered ic timeout = - let buf_size = Bytes.length ic.buf in - let fill_no_exc timeout len = - Xapi_stdext_unix.Unixext.with_socket_timeout ic.fd timeout @@ fun () -> - try - let n = Unix.read ic.fd ic.buf ic.max len in - ic.max <- n + ic.max ; - if n = 0 && len <> 0 then raise Eof ; - n - with Unix.Unix_error (Unix.(EAGAIN | EWOULDBLOCK), _, _) -> -1 - in - (* If there's no space to read, shift *) - if ic.max = buf_size then shift ic ; - let space_left = buf_size - ic.max in - (* Read byte one by one just do make sure we don't buffer too many chars *) - let n = - fill_no_exc (Some timeout) - (if buffered then space_left else min space_left 1) - in - (* Select returned nothing to read *) - if n = -1 then raise Timeout ; - if n = space_left then ( - shift ic ; - let tofillsz = - if buffered then buf_size - ic.max else min (buf_size - ic.max) 1 - in - (* cannot use 0 here, for select that'd mean timeout immediately, for - setsockopt it would mean no timeout. - So use a very short timeout instead - *) - ignore (fill_no_exc (Some 1e-6) tofillsz) - ) - -(** Input 'len' characters from ic and put them into the bytestring 'b' starting from 'from' *) -let rec really_input ?(timeout = 15.0) ic b from len = - if len = 0 then - () - else ( - if ic.max - ic.cur < len then fill_buf ~buffered:true ic timeout ; - let blitlen = if ic.max - ic.cur < len then ic.max - ic.cur else len in - Bytes.blit ic.buf ic.cur b from blitlen ; - ic.cur <- ic.cur + blitlen ; - really_input ~timeout ic b (from + blitlen) (len - blitlen) - ) - -let really_input_buf ?timeout ic len = - let blksize = 2048 in - let buf = Buffer.create blksize in - let s = Bytes.create blksize in - let left = ref len in - while !left > 0 do - let size = min blksize !left in - really_input ?timeout ic s 0 size ; - Buffer.add_subbytes buf s 0 size ; - left := !left - size - done ; - Buffer.contents buf diff --git a/ocaml/libs/http-lib/buf_io.mli b/ocaml/libs/http-lib/buf_io.mli deleted file mode 100644 index fc76f1932e2..00000000000 --- a/ocaml/libs/http-lib/buf_io.mli +++ /dev/null @@ -1,50 +0,0 @@ -(* - * Copyright (C) 2006-2009 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) -(** Buffered IO with timeouts *) - -(** {2 Abstract type of inputs} *) -type t - -val of_fd : Unix.file_descr -> t - -val fd_of : t -> Unix.file_descr - -val infinite_timeout : float - -(** {2 Input functions} *) - -val really_input : ?timeout:float -> t -> bytes -> int -> int -> unit -(** Input 'len' characters from ic and put them into the string 'str' starting from 'from' *) - -val really_input_buf : ?timeout:float -> t -> int -> string - -(** {2 Exceptions} *) - -(** Waited too long for data to appear *) -exception Timeout - -exception Eof - -(** {2 Internal functions} *) - -val is_buffer_empty : t -> bool - -val assert_buffer_empty : t -> unit - -(* val assert_buffer_empty : t -> unit - val shift : t -> unit - val got_line : t -> int - val is_full : t -> bool - val fill_buf : buffered:bool -> t -> float -> unit -*) diff --git a/ocaml/libs/http-lib/bufio_test.ml b/ocaml/libs/http-lib/bufio_test.ml deleted file mode 100644 index 81aac2ad879..00000000000 --- a/ocaml/libs/http-lib/bufio_test.ml +++ /dev/null @@ -1,106 +0,0 @@ -open QCheck2 -open Xapi_fd_test - -let print_timeout = string_of_float - -let expect_string ~expected ~actual = - if not (String.equal expected actual) then - Test.fail_reportf "Data sent and observed do not match: %S <> %S" expected - actual - -let expect_amount ~expected observation = - let open Observations in - let actual = String.length observation.data in - if expected <> actual then - Test.fail_reportf - "Amount of data available and transferred does not match: %d <> %d;@,%a" - expected actual pp observation - -let test_buf_io = - let timeouts = Generate.timeouts in - let gen = Gen.tup2 Generate.t timeouts - and print = Print.tup2 Generate.print print_timeout in - Test.make ~name:__FUNCTION__ ~print gen @@ fun (behaviour, timeout) -> - let every_bytes = - Int.min - (Option.map Observations.Delay.every_bytes behaviour.delay_read - |> Option.value ~default:Int.max_int - ) - (Option.map Observations.Delay.every_bytes behaviour.delay_write - |> Option.value ~default:Int.max_int - ) - in - let operations = Int.max 1 (behaviour.size / every_bytes) in - (* Buf_io uses per-operation timeouts, not a timeout for the whole function, - so if we want a timeout of 0.1s and we insert some delays every 1 byte, - for 64KiB bytes in total, then we need 0.1/65536 timeout for individual operations. - - timeout_span remains the span for the entire function, - and timeout the per operation timeout that we'll pass to the function under test. - *) - let timeout_span = Mtime.Span.of_float_ns (timeout *. 1e9) |> Option.get in - let timeout = timeout /. float operations in - let timeout_operation_span = - Mtime.Span.of_float_ns (timeout *. 1e9) |> Option.get - in - (* timeout < 1us would get truncated to 0 *) - QCheck2.assume (timeout > 1e-6) ; - (* Format.eprintf "Testing %s@." (print (behaviour, timeout)); *) - if behaviour.kind <> Unix.S_SOCK then - QCheck2.assume_fail () ; - (* we only support sockets for this function *) - let test_elapsed = ref Mtime.Span.zero in - let test wrapped_fd = - let fd = Xapi_fdcaps.Operations.For_test.unsafe_fd_exn wrapped_fd in - let bio = Buf_io.of_fd fd in - let dt = Mtime_clock.counter () in - let finally () = test_elapsed := Mtime_clock.count dt in - Fun.protect ~finally (fun () -> - Buf_io.really_input_buf bio behaviour.size ~timeout - ) - in - (*Printf.eprintf "testing: %s\n%!" (print (behaviour, timeout)) ;*) - let observations, result = - let buf = String.init behaviour.size (fun i -> Char.chr (i mod 255)) in - Generate.run_ro behaviour buf ~f:test - in - let () = - let open Observations in - let elapsed = !test_elapsed in - let timeout_extra = - Mtime.Span.(add (timeout_span :> Mtime.Span.t) @@ (500 * ms)) - in - if Mtime.Span.compare elapsed timeout_extra > 0 then - Test.fail_reportf - "Function duration significantly exceeds timeout: %a > %.6f; %s" - Mtime.Span.pp elapsed timeout - (Fmt.to_to_string Fmt.(option pp) observations.Observations.write) ; - (* Format.eprintf "Result: %a@." (Fmt.option Observations.pp) observations.write;*) - match (observations, result) with - | {write= Some write; _}, Ok actual -> - expect_amount ~expected:(String.length actual) write ; - expect_string ~expected:write.data ~actual - | {write= Some _; _}, Error (`Exn_trap (Buf_io.Timeout, _)) -> - let elapsed = !test_elapsed in - if Mtime.Span.compare elapsed timeout_operation_span < 0 then - Test.fail_reportf "Timed out earlier than requested: %a < %a" - Mtime.Span.pp elapsed Mtime.Span.pp timeout_span - | ( {write= Some write; _} - , Error (`Exn_trap (Unix.Unix_error (Unix.EPIPE, _, _), _)) ) -> - if String.length write.data = behaviour.size then - Test.fail_reportf - "Transferred exact amount, shouldn't have tried to send more: %d" - behaviour.size - | {write= None; _}, _ -> - () - | _, Error (`Exn_trap (e, bt)) -> - Printexc.raise_with_backtrace e bt - in - true - -let tests = [test_buf_io] - -let () = - (* avoid SIGPIPE *) - let (_ : Sys.signal_behavior) = Sys.signal Sys.sigpipe Sys.Signal_ignore in - () diff --git a/ocaml/libs/http-lib/bufio_test.mli b/ocaml/libs/http-lib/bufio_test.mli deleted file mode 100644 index a10acd45016..00000000000 --- a/ocaml/libs/http-lib/bufio_test.mli +++ /dev/null @@ -1 +0,0 @@ -val tests : QCheck2.Test.t list diff --git a/ocaml/libs/http-lib/bufio_test_run.ml b/ocaml/libs/http-lib/bufio_test_run.ml deleted file mode 100644 index a7a1cacab7e..00000000000 --- a/ocaml/libs/http-lib/bufio_test_run.ml +++ /dev/null @@ -1 +0,0 @@ -let () = QCheck_base_runner.run_tests_main Bufio_test.tests diff --git a/ocaml/libs/http-lib/bufio_test_run.mli b/ocaml/libs/http-lib/bufio_test_run.mli deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/ocaml/libs/http-lib/dune b/ocaml/libs/http-lib/dune index cc5bec51648..2990fda2453 100644 --- a/ocaml/libs/http-lib/dune +++ b/ocaml/libs/http-lib/dune @@ -3,7 +3,7 @@ (public_name http-lib) (modes best) (wrapped false) - (modules (:standard \ http_svr http_proxy server_io http_test radix_tree_test test_client test_server bufio_test bufio_test_run)) + (modules (:standard \ http_svr http_proxy server_io http_test radix_tree_test test_client test_server)) (preprocess (per_module ((pps ppx_deriving_rpc) Http))) (libraries astring @@ -67,42 +67,6 @@ ) ) -(test - (name bufio_test_run) - (package http-lib) - (modes (best exe)) - (modules bufio_test_run) - (libraries - qcheck-core.runner - bufio_test - ) - ; use fixed seed to avoid causing random failures in CI and package builds - (action (run %{test} -v -bt --seed 42)) -) - -(library - (name bufio_test) - (modes best) - (modules bufio_test) - (libraries - fmt - mtime - mtime.clock - mtime.clock.os - rresult - http_lib - qcheck-core - xapi_fd_test - ) -) - -(rule - (alias stresstest) - (deps bufio_test_run.exe) - ; use default random seed on stresstests - (action (run %{deps} -v -bt)) -) - (executable (modes exe) (name test_client) diff --git a/ocaml/libs/http-lib/test_server.ml b/ocaml/libs/http-lib/test_server.ml index 2cae4f4ba5f..44c07301fd7 100644 --- a/ocaml/libs/http-lib/test_server.ml +++ b/ocaml/libs/http-lib/test_server.ml @@ -16,74 +16,63 @@ let _ = "A simple test HTTP server" ; let open Http_svr in let server = Server.empty () in - Server.add_handler server Http.Get "/stop" - (FdIO - (fun _ s _ -> - let r = Http.Response.to_wire_string (Http.Response.make "200" "OK") in - Unixext.really_write_string s r ; - with_lock finished_m (fun () -> - finished := true ; - Condition.signal finished_c - ) - ) - ) ; - Server.add_handler server Http.Post "/echo" - (FdIO - (fun request s _ -> - match request.Http.Request.content_length with - | None -> - Unixext.really_write_string s - (Http.Response.to_wire_string - (Http.Response.make "404" "content length missing") - ) - | Some l -> - let txt = Unixext.really_read_string s (Int64.to_int l) in - let r = - Http.Response.to_wire_string - (Http.Response.make ~body:txt "200" "OK") - in - Unixext.really_write_string s r - ) - ) ; - Server.add_handler server Http.Get "/stats" - (FdIO - (fun _ s _ -> - let lines = - List.map - (fun (m, uri, s) -> - Printf.sprintf "%s,%s,%d,%d\n" - (Http.string_of_method_t m) - uri s.Http_svr.Stats.n_requests s.Http_svr.Stats.n_connections - ) - (Server.all_stats server) - in - let txt = String.concat "" lines in - let r = - Http.Response.to_wire_string (Http.Response.make ~body:txt "200" "OK") - in - Unixext.really_write_string s r - ) - ) ; - Server.add_handler server Http.Get "/query" - (FdIO - (fun request s _ -> - match request.Http.Request.query with - | (_, v) :: _ -> - Unixext.really_write_string s - (Http.Response.to_wire_string - (Http.Response.make ~body:v "200" "OK") - ) - | _ -> - Unixext.really_write_string s - (Http.Response.to_wire_string - (Http.Response.make "404" "Query string missing") - ) - ) - ) ; + Server.add_handler server Http.Get "/stop" (fun _ s _ -> + let r = Http.Response.to_wire_string (Http.Response.make "200" "OK") in + Unixext.really_write_string s r ; + with_lock finished_m (fun () -> + finished := true ; + Condition.signal finished_c + ) + ) ; + Server.add_handler server Http.Post "/echo" (fun request s _ -> + match request.Http.Request.content_length with + | None -> + Unixext.really_write_string s + (Http.Response.to_wire_string + (Http.Response.make "404" "content length missing") + ) + | Some l -> + let txt = Unixext.really_read_string s (Int64.to_int l) in + let r = + Http.Response.to_wire_string + (Http.Response.make ~body:txt "200" "OK") + in + Unixext.really_write_string s r + ) ; + Server.add_handler server Http.Get "/stats" (fun _ s _ -> + let lines = + List.map + (fun (m, uri, s) -> + Printf.sprintf "%s,%s,%d,%d\n" + (Http.string_of_method_t m) + uri s.Http_svr.Stats.n_requests s.Http_svr.Stats.n_connections + ) + (Server.all_stats server) + in + let txt = String.concat "" lines in + let r = + Http.Response.to_wire_string (Http.Response.make ~body:txt "200" "OK") + in + Unixext.really_write_string s r + ) ; + Server.add_handler server Http.Get "/query" (fun request s _ -> + match request.Http.Request.query with + | (_, v) :: _ -> + Unixext.really_write_string s + (Http.Response.to_wire_string + (Http.Response.make ~body:v "200" "OK") + ) + | _ -> + Unixext.really_write_string s + (Http.Response.to_wire_string + (Http.Response.make "404" "Query string missing") + ) + ) ; (* Forces a protocol error by closing the connection without sending a proper http reponse code *) - Server.add_handler server Http.Get "/close_conn" - (FdIO (fun _ _ _ -> raise End_of_file)) ; + Server.add_handler server Http.Get "/close_conn" (fun _ _ _ -> + raise End_of_file + ) ; let ip = "0.0.0.0" in let inet_addr = Unix.inet_addr_of_string ip in let addr = Unix.ADDR_INET (inet_addr, !port) in diff --git a/ocaml/quicktest/dune b/ocaml/quicktest/dune index ac0bc21c193..1babfb7d1bb 100644 --- a/ocaml/quicktest/dune +++ b/ocaml/quicktest/dune @@ -21,7 +21,6 @@ rrdd_libs stunnel unixext_test - bufio_test test_timer threads.posix unix diff --git a/ocaml/quicktest/quicktest.ml b/ocaml/quicktest/quicktest.ml index 38a139666ae..f4f8309ec34 100644 --- a/ocaml/quicktest/quicktest.ml +++ b/ocaml/quicktest/quicktest.ml @@ -15,11 +15,7 @@ (** The main entry point of the quicktest executable *) let qchecks = - [ - ("unixext", Unixext_test.tests) - ; ("bufio", Bufio_test.tests) - ; ("Timer", Test_timer.tests) - ] + [("unixext", Unixext_test.tests); ("Timer", Test_timer.tests)] |> List.map @@ fun (name, test) -> (name, List.map QCheck_alcotest.(to_alcotest ~long:true) test) From 99b8ad607c0d15fbe1c0db09671e12c9f5fc2f82 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Mon, 21 Oct 2024 15:27:24 +0100 Subject: [PATCH 135/141] CA-400860: make CPU and netdev RRDD plugins pick up changes in domains When these metrics were collected internally, Xenctrl was queried every 5 seconds. After being split into plugins, they started querying domains (and other information) only on startup, so couldn't pick up new VMs and report their metrics without restarting. Signed-off-by: Andrii Sultanov --- ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml | 7 ++++--- ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml | 20 +++++++++---------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml index 8b56119a76e..7a0db5ec5d7 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml +++ b/ocaml/xcp-rrdd/bin/rrdp-cpu/rrdp_cpu.ml @@ -226,7 +226,8 @@ let dss_hostload xc domains = ) ] -let generate_cpu_ds_list xc domains () = +let generate_cpu_ds_list xc () = + let _, domains, _ = Xenctrl_lib.domain_snapshot xc in dss_pcpus xc @ dss_vcpus xc domains @ dss_loadavg () @ dss_hostload xc domains let _ = @@ -236,8 +237,8 @@ let _ = (* Share one page per PCPU and dom each *) let physinfo = Xenctrl.physinfo xc in let shared_page_count = physinfo.Xenctrl.nr_cpus + List.length domains in - + (* TODO: Can run out of pages if a lot of domains are added at runtime *) Process.main_loop ~neg_shift:0.5 ~target:(Reporter.Local shared_page_count) ~protocol:Rrd_interface.V2 - ~dss_f:(generate_cpu_ds_list xc domains) + ~dss_f:(generate_cpu_ds_list xc) ) diff --git a/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml b/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml index 5b138aebbe0..299bb9a97df 100644 --- a/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml +++ b/ocaml/xcp-rrdd/bin/rrdp-netdev/rrdp_netdev.ml @@ -131,7 +131,11 @@ let transform_taps devs = ) newdevnames -let generate_netdev_dss doms () = +let generate_netdev_dss () = + let _, doms, _ = + Xenctrl.with_intf (fun xc -> Xenctrl_lib.domain_snapshot xc) + in + let uuid_of_domid domains domid = let _, uuid, _ = try List.find (fun (_, _, domid') -> domid = domid') domains @@ -265,12 +269,8 @@ let generate_netdev_dss doms () = @ dss let _ = - Xenctrl.with_intf (fun xc -> - let _, domains, _ = Xenctrl_lib.domain_snapshot xc in - Process.initialise () ; - (* Share one page per virtual NIC - documentation specifies max is 512 *) - let shared_page_count = 512 in - Process.main_loop ~neg_shift:0.5 - ~target:(Reporter.Local shared_page_count) ~protocol:Rrd_interface.V2 - ~dss_f:(generate_netdev_dss domains) - ) + Process.initialise () ; + (* Share one page per virtual NIC - documentation specifies max is 512 *) + let shared_page_count = 512 in + Process.main_loop ~neg_shift:0.5 ~target:(Reporter.Local shared_page_count) + ~protocol:Rrd_interface.V2 ~dss_f:generate_netdev_dss From c64798513519132019b28fb50ca6c7f07a445587 Mon Sep 17 00:00:00 2001 From: Vincent Liu Date: Mon, 30 Sep 2024 14:51:55 +0100 Subject: [PATCH 136/141] CP-51683: Make Cluster_health non-exp feature Remove all the gating on cluster_health enabled as an experimental feature now that it is enabled by default. Signed-off-by: Vincent Liu --- ocaml/xapi/xapi_cluster.ml | 23 +++++++--------- ocaml/xapi/xapi_cluster_helpers.ml | 43 +++++++++++++----------------- ocaml/xapi/xapi_cluster_host.ml | 34 ++++++++++------------- ocaml/xapi/xapi_clustering.ml | 28 +++++++++---------- 4 files changed, 54 insertions(+), 74 deletions(-) diff --git a/ocaml/xapi/xapi_cluster.ml b/ocaml/xapi/xapi_cluster.ml index 355bf175527..498a0ea4111 100644 --- a/ocaml/xapi/xapi_cluster.ml +++ b/ocaml/xapi/xapi_cluster.ml @@ -65,15 +65,12 @@ let create ~__context ~pIF ~cluster_stack ~pool_auto_join ~token_timeout let hostuuid = Inventory.lookup Inventory._installation_uuid in let hostname = Db.Host.get_hostname ~__context ~self:host in let member = - if Xapi_cluster_helpers.cluster_health_enabled ~__context then - Extended - { - ip= Ipaddr.of_string_exn (ipstr_of_address ip_addr) - ; hostuuid - ; hostname - } - else - IPv4 (ipstr_of_address ip_addr) + Extended + { + ip= Ipaddr.of_string_exn (ipstr_of_address ip_addr) + ; hostuuid + ; hostname + } in let token_timeout_ms = Int64.of_float (token_timeout *. 1000.0) in let token_timeout_coefficient_ms = @@ -298,8 +295,6 @@ let pool_resync ~__context ~self:_ = find or create a matching cluster_host which is also enabled *) let cstack_sync ~__context ~self = - if Xapi_cluster_helpers.cluster_health_enabled ~__context then ( - debug "%s: sync db data with cluster stack" __FUNCTION__ ; - Watcher.on_corosync_update ~__context ~cluster:self - ["Updates due to cluster api calls"] - ) + debug "%s: sync db data with cluster stack" __FUNCTION__ ; + Watcher.on_corosync_update ~__context ~cluster:self + ["Updates due to cluster api calls"] diff --git a/ocaml/xapi/xapi_cluster_helpers.ml b/ocaml/xapi/xapi_cluster_helpers.ml index b46389f8a86..2582790e929 100644 --- a/ocaml/xapi/xapi_cluster_helpers.ml +++ b/ocaml/xapi/xapi_cluster_helpers.ml @@ -104,11 +104,6 @@ let with_cluster_operation ~__context ~(self : [`Cluster] API.Ref.t) ~doc ~op with _ -> () ) -let cluster_health_enabled ~__context = - let pool = Helpers.get_pool ~__context in - let restrictions = Db.Pool.get_restrictions ~__context ~self:pool in - List.assoc_opt "restrict_cluster_health" restrictions = Some "false" - let corosync3_enabled ~__context = let pool = Helpers.get_pool ~__context in let restrictions = Db.Pool.get_restrictions ~__context ~self:pool in @@ -147,23 +142,21 @@ let maybe_generate_alert ~__context ~num_hosts ~hosts_left ~hosts_joined ~quorum ~cls:`Host ~obj_uuid:host_uuid ~body ) in - if cluster_health_enabled ~__context then ( - List.iter (generate_alert false) hosts_left ; - List.iter (generate_alert true) hosts_joined ; - (* only generate this alert when the number of hosts is decreasing *) - if hosts_left <> [] && num_hosts <= quorum then - let pool = Helpers.get_pool ~__context in - let pool_uuid = Db.Pool.get_uuid ~__context ~self:pool in - let name, priority = Api_messages.cluster_quorum_approaching_lost in - let body = - Printf.sprintf - "The cluster is losing quorum: currently %d host(s), need %d host(s) \ - for a quorum" - num_hosts quorum - in - Helpers.call_api_functions ~__context (fun rpc session_id -> - ignore - @@ Client.Client.Message.create ~rpc ~session_id ~name ~priority - ~cls:`Pool ~obj_uuid:pool_uuid ~body - ) - ) + List.iter (generate_alert false) hosts_left ; + List.iter (generate_alert true) hosts_joined ; + (* only generate this alert when the number of hosts is decreasing *) + if hosts_left <> [] && num_hosts <= quorum then + let pool = Helpers.get_pool ~__context in + let pool_uuid = Db.Pool.get_uuid ~__context ~self:pool in + let name, priority = Api_messages.cluster_quorum_approaching_lost in + let body = + Printf.sprintf + "The cluster is losing quorum: currently %d host(s), need %d host(s) \ + for a quorum" + num_hosts quorum + in + Helpers.call_api_functions ~__context (fun rpc session_id -> + ignore + @@ Client.Client.Message.create ~rpc ~session_id ~name ~priority + ~cls:`Pool ~obj_uuid:pool_uuid ~body + ) diff --git a/ocaml/xapi/xapi_cluster_host.ml b/ocaml/xapi/xapi_cluster_host.ml index 9644ca8cd78..e022f75c706 100644 --- a/ocaml/xapi/xapi_cluster_host.ml +++ b/ocaml/xapi/xapi_cluster_host.ml @@ -126,15 +126,12 @@ let join_internal ~__context ~self = let host = Db.Cluster_host.get_host ~__context ~self in let hostname = Db.Host.get_hostname ~__context ~self:host in let member = - if Xapi_cluster_helpers.cluster_health_enabled ~__context then - Extended - { - ip= Ipaddr.of_string_exn (ipstr_of_address ip_addr) - ; hostuuid - ; hostname - } - else - IPv4 (ipstr_of_address ip_addr) + Extended + { + ip= Ipaddr.of_string_exn (ipstr_of_address ip_addr) + ; hostuuid + ; hostname + } in let ip_list = List.filter_map @@ -341,17 +338,14 @@ let enable ~__context ~self = let hostuuid = Inventory.lookup Inventory._installation_uuid in let hostname = Db.Host.get_hostname ~__context ~self:host in let member = - if Xapi_cluster_helpers.cluster_health_enabled ~__context then - Cluster_interface.( - Extended - { - ip= Ipaddr.of_string_exn (ipstr_of_address ip_addr) - ; hostuuid - ; hostname - } - ) - else - Cluster_interface.(IPv4 (ipstr_of_address ip_addr)) + Cluster_interface.( + Extended + { + ip= Ipaddr.of_string_exn (ipstr_of_address ip_addr) + ; hostuuid + ; hostname + } + ) in let cluster_ref = Db.Cluster_host.get_cluster ~__context ~self in let cluster_stack = diff --git a/ocaml/xapi/xapi_clustering.ml b/ocaml/xapi/xapi_clustering.ml index ec6efe81d00..d2b61be2f55 100644 --- a/ocaml/xapi/xapi_clustering.ml +++ b/ocaml/xapi/xapi_clustering.ml @@ -675,21 +675,19 @@ module Watcher = struct let is_master = Helpers.is_pool_master ~__context ~host in let daemon_enabled = Daemon.is_enabled () in if is_master && daemon_enabled then ( - if Xapi_cluster_helpers.cluster_health_enabled ~__context then - if Atomic.compare_and_set cluster_change_watcher false true then ( - debug "%s: create watcher for corosync-notifyd on coordinator" - __FUNCTION__ ; - Atomic.set finish_watch false ; - let _ : Thread.t = - Thread.create (fun () -> watch_cluster_change ~__context ~host) () - in - () - ) else - (* someone else must have gone into the if branch above and created the thread - before us, leave it to them *) - debug - "%s: not create watcher for corosync-notifyd as it already exists" - __FUNCTION__ ; + if Atomic.compare_and_set cluster_change_watcher false true then ( + debug "%s: create watcher for corosync-notifyd on coordinator" + __FUNCTION__ ; + Atomic.set finish_watch false ; + let _ : Thread.t = + Thread.create (fun () -> watch_cluster_change ~__context ~host) () + in + () + ) else + (* someone else must have gone into the if branch above and created the thread + before us, leave it to them *) + debug "%s: not create watcher for corosync-notifyd as it already exists" + __FUNCTION__ ; if Xapi_cluster_helpers.corosync3_enabled ~__context then if Atomic.compare_and_set cluster_stack_watcher false true then ( From 264558d13e6ee558622298a851e5422b8747fb91 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Thu, 29 Aug 2024 13:28:27 +0100 Subject: [PATCH 137/141] CA-398341: Populate fingerprints of CA certificates on startup SHA256 and SHA1 certificates' fingerprints do not get populated when the database is upgraded, so empty values need to be detected and amended on startup. Signed-off-by: Pau Ruiz Safont Signed-off-by: Steven Woods --- ocaml/idl/datamodel_common.ml | 2 +- ocaml/xapi/xapi_db_upgrade.ml | 62 +++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/ocaml/idl/datamodel_common.ml b/ocaml/idl/datamodel_common.ml index 7c557282259..3fb163cc961 100644 --- a/ocaml/idl/datamodel_common.ml +++ b/ocaml/idl/datamodel_common.ml @@ -10,7 +10,7 @@ open Datamodel_roles to leave a gap for potential hotfixes needing to increment the schema version.*) let schema_major_vsn = 5 -let schema_minor_vsn = 782 +let schema_minor_vsn = 783 (* Historical schema versions just in case this is useful later *) let rio_schema_major_vsn = 5 diff --git a/ocaml/xapi/xapi_db_upgrade.ml b/ocaml/xapi/xapi_db_upgrade.ml index b9ecf94ba01..f4102782916 100644 --- a/ocaml/xapi/xapi_db_upgrade.ml +++ b/ocaml/xapi/xapi_db_upgrade.ml @@ -904,6 +904,67 @@ let upgrade_update_guidance = ) } +let upgrade_ca_fingerprints = + { + description= "Upgrade the fingerprint fields for ca certificates" + ; version= (fun x -> x < (5, 783)) + ; (* the version where we started updating missing fingerprint_sha256 + and fingerprint_sha1 fields for ca certs *) + fn= + (fun ~__context -> + let expr = + let open Xapi_database.Db_filter_types in + And + ( Or + ( Eq (Field "fingerprint_sha256", Literal "") + , Eq (Field "fingerprint_sha1", Literal "") + ) + , Eq (Field "type", Literal "ca") + ) + in + let empty = Db.Certificate.get_records_where ~__context ~expr in + List.iter + (fun (self, record) -> + let read_fingerprints filename = + let ( let* ) = Result.bind in + try + let* certificate = + Xapi_stdext_unix.Unixext.string_of_file filename + |> Cstruct.of_string + |> X509.Certificate.decode_pem + in + let sha1 = + Certificates.pp_fingerprint ~hash_type:`SHA1 certificate + in + let sha256 = + Certificates.pp_fingerprint ~hash_type:`SHA256 certificate + in + Ok (sha1, sha256) + with + | Unix.Unix_error (Unix.ENOENT, _, _) -> + Error + (`Msg (Printf.sprintf "filename %s does not exist" filename)) + | exn -> + Error (`Msg (Printexc.to_string exn)) + in + let filename = + Filename.concat + !Xapi_globs.trusted_certs_dir + record.API.certificate_name + in + match read_fingerprints filename with + | Ok (sha1, sha256) -> + Db.Certificate.set_fingerprint_sha1 ~__context ~self ~value:sha1 ; + Db.Certificate.set_fingerprint_sha256 ~__context ~self + ~value:sha256 + | Error (`Msg msg) -> + D.info "%s: ignoring error when reading CA certificate %s: %s" + __FUNCTION__ record.API.certificate_name msg + ) + empty + ) + } + let rules = [ upgrade_domain_type @@ -933,6 +994,7 @@ let rules = ; remove_legacy_ssl_support ; empty_pool_uefi_certificates ; upgrade_update_guidance + ; upgrade_ca_fingerprints ] (* Maybe upgrade most recent db *) From ed90086a38a0d1d352546da9ec227fbbfc391106 Mon Sep 17 00:00:00 2001 From: Steven Woods Date: Tue, 17 Sep 2024 17:06:23 +0100 Subject: [PATCH 138/141] CP-51527: Add --force option to pool-uninstall-ca-certificate This allows the CA certificate to be removed from the DB even if the certificate file does not exist. Signed-off-by: Steven Woods --- ocaml/idl/datamodel_host.ml | 34 ++++++++++++++++++++--- ocaml/idl/datamodel_pool.ml | 36 +++++++++++++++++++++++-- ocaml/xapi-cli-server/cli_frontend.ml | 7 +++-- ocaml/xapi-cli-server/cli_operations.ml | 3 ++- ocaml/xapi/certificates.ml | 30 ++++++++++++--------- ocaml/xapi/certificates.mli | 5 ++-- ocaml/xapi/message_forwarding.ml | 13 +++++---- ocaml/xapi/xapi_host.ml | 6 ++--- ocaml/xapi/xapi_host.mli | 2 +- ocaml/xapi/xapi_pool.ml | 8 +++--- ocaml/xapi/xapi_pool.mli | 3 ++- 11 files changed, 111 insertions(+), 36 deletions(-) diff --git a/ocaml/idl/datamodel_host.ml b/ocaml/idl/datamodel_host.ml index 266d695fa34..b0fb9a6aace 100644 --- a/ocaml/idl/datamodel_host.ml +++ b/ocaml/idl/datamodel_host.ml @@ -1479,12 +1479,40 @@ let install_ca_certificate = let uninstall_ca_certificate = call ~pool_internal:true ~hide_from_docs:true ~name:"uninstall_ca_certificate" ~doc:"Remove a TLS CA certificate from this host." - ~params: + ~versioned_params: [ - (Ref _host, "host", "The host"); (String, "name", "The certificate name") + { + param_type= Ref _host + ; param_name= "host" + ; param_doc= "The host" + ; param_release= numbered_release "1.290.0" + ; param_default= None + } + ; { + param_type= String + ; param_name= "name" + ; param_doc= "The certificate name" + ; param_release= numbered_release "1.290.0" + ; param_default= None + } + ; { + param_type= Bool + ; param_name= "force" + ; param_doc= "Remove the DB entry even if the file is non-existent" + ; param_release= numbered_release "24.35.0" + ; param_default= Some (VBool false) + } ] ~allowed_roles:_R_LOCAL_ROOT_ONLY - ~lifecycle:[(Published, "1.290.0", "Uninstall TLS CA certificate")] + ~lifecycle: + [ + (Published, "1.290.0", "Uninstall TLS CA certificate") + ; ( Changed + , "24.35.0" + , "Added --force option to allow DB entries to be removed for \ + non-existent files" + ) + ] () let certificate_list = diff --git a/ocaml/idl/datamodel_pool.ml b/ocaml/idl/datamodel_pool.ml index f94a531903c..ab0d1669788 100644 --- a/ocaml/idl/datamodel_pool.ml +++ b/ocaml/idl/datamodel_pool.ml @@ -851,9 +851,41 @@ let certificate_uninstall = let uninstall_ca_certificate = call ~name:"uninstall_ca_certificate" ~doc:"Remove a pool-wide TLS CA certificate." - ~params:[(String, "name", "The certificate name")] + ~params: + [ + (String, "name", "The certificate name") + ; ( Bool + , "force" + , "If true, remove the DB entry even if the file is non-existent" + ) + ] + ~versioned_params: + [ + { + param_type= String + ; param_name= "name" + ; param_doc= "The certificate name" + ; param_release= numbered_release "1.290.0" + ; param_default= None + } + ; { + param_type= Bool + ; param_name= "force" + ; param_doc= "Remove the DB entry even if the file is non-existent" + ; param_release= numbered_release "24.35.0" + ; param_default= Some (VBool false) + } + ] ~allowed_roles:(_R_POOL_OP ++ _R_CLIENT_CERT) - ~lifecycle:[(Published, "1.290.0", "Uninstall TLS CA certificate")] + ~lifecycle: + [ + (Published, "1.290.0", "Uninstall TLS CA certificate") + ; ( Changed + , "24.35.0" + , "Added --force option to allow DB entries to be removed for \ + non-existent files" + ) + ] () let certificate_list = diff --git a/ocaml/xapi-cli-server/cli_frontend.ml b/ocaml/xapi-cli-server/cli_frontend.ml index 881d016267a..3de231f3cad 100644 --- a/ocaml/xapi-cli-server/cli_frontend.ml +++ b/ocaml/xapi-cli-server/cli_frontend.ml @@ -396,8 +396,11 @@ let rec cmdtable_data : (string * cmd_spec) list = ; ( "pool-uninstall-ca-certificate" , { reqd= ["name"] - ; optn= [] - ; help= "Uninstall a pool-wide TLS CA certificate." + ; optn= ["force"] + ; help= + "Uninstall a pool-wide TLS CA certificate. The optional parameter \ + '--force' will remove the DB entry even if the certificate file is \ + non-existent" ; implementation= No_fd Cli_operations.pool_uninstall_ca_certificate ; flags= [] } diff --git a/ocaml/xapi-cli-server/cli_operations.ml b/ocaml/xapi-cli-server/cli_operations.ml index aa3bf08c05a..1e8ba0f3b37 100644 --- a/ocaml/xapi-cli-server/cli_operations.ml +++ b/ocaml/xapi-cli-server/cli_operations.ml @@ -1770,7 +1770,8 @@ let pool_install_ca_certificate fd _printer rpc session_id params = let pool_uninstall_ca_certificate _printer rpc session_id params = let name = List.assoc "name" params in - Client.Pool.uninstall_ca_certificate ~rpc ~session_id ~name + let force = get_bool_param params "force" in + Client.Pool.uninstall_ca_certificate ~rpc ~session_id ~name ~force let pool_certificate_list printer rpc session_id _params = printer (Cli_printer.PList (Client.Pool.certificate_list ~rpc ~session_id)) diff --git a/ocaml/xapi/certificates.ml b/ocaml/xapi/certificates.ml index 2ae9e72aebe..9a6298c129d 100644 --- a/ocaml/xapi/certificates.ml +++ b/ocaml/xapi/certificates.ml @@ -304,17 +304,21 @@ let host_install kind ~name ~cert = (ExnHelper.string_of_exn e) ; raise_library_corrupt () -let host_uninstall kind ~name = +let host_uninstall kind ~name ~force = validate_name kind name ; let filename = library_filename kind name in - if not (Sys.file_exists filename) then - raise_does_not_exist kind name ; - debug "Uninstalling %s %s" (to_string kind) name ; - try Sys.remove filename ; update_ca_bundle () - with e -> - warn "Exception uninstalling %s %s: %s" (to_string kind) name - (ExnHelper.string_of_exn e) ; - raise_corrupt kind name + if Sys.file_exists filename then ( + debug "Uninstalling %s %s" (to_string kind) name ; + try Sys.remove filename ; update_ca_bundle () + with e -> + warn "Exception uninstalling %s %s: %s" (to_string kind) name + (ExnHelper.string_of_exn e) ; + raise_corrupt kind name + ) else if force then + info "Certificate file %s is non-existent but ignoring this due to force." + name + else + raise_does_not_exist kind name let get_cert kind name = validate_name kind name ; @@ -367,6 +371,7 @@ let sync_certs kind ~__context master_certs host = ) (fun rpc session_id host name -> Client.Host.uninstall_ca_certificate ~rpc ~session_id ~host ~name + ~force:false ) ~__context master_certs host | CRL -> @@ -403,15 +408,16 @@ let pool_install kind ~__context ~name ~cert = host_install kind ~name ~cert ; try pool_sync ~__context with exn -> - ( try host_uninstall kind ~name + ( try host_uninstall kind ~name ~force:false with e -> warn "Exception unwinding install of %s %s: %s" (to_string kind) name (ExnHelper.string_of_exn e) ) ; raise exn -let pool_uninstall kind ~__context ~name = - host_uninstall kind ~name ; pool_sync ~__context +let pool_uninstall kind ~__context ~name ~force = + host_uninstall kind ~name ~force ; + pool_sync ~__context (* Extracts the server certificate from the server certificate pem file. It strips the private key as well as the rest of the certificate chain. *) diff --git a/ocaml/xapi/certificates.mli b/ocaml/xapi/certificates.mli index 486ada825e2..064c7e47e31 100644 --- a/ocaml/xapi/certificates.mli +++ b/ocaml/xapi/certificates.mli @@ -53,12 +53,13 @@ val install_server_certificate : val host_install : t_trusted -> name:string -> cert:string -> unit -val host_uninstall : t_trusted -> name:string -> unit +val host_uninstall : t_trusted -> name:string -> force:bool -> unit val pool_install : t_trusted -> __context:Context.t -> name:string -> cert:string -> unit -val pool_uninstall : t_trusted -> __context:Context.t -> name:string -> unit +val pool_uninstall : + t_trusted -> __context:Context.t -> name:string -> force:bool -> unit (* Database manipulation *) diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index c85dc2cb025..17ff3de0261 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -3745,19 +3745,22 @@ functor ~cert ) - let uninstall_ca_certificate ~__context ~host ~name = - info "Host.uninstall_ca_certificate: host = '%s'; name = '%s'" + let uninstall_ca_certificate ~__context ~host ~name ~force = + info + "Host.uninstall_ca_certificate: host = '%s'; name = '%s'; force = \ + '%b'" (host_uuid ~__context host) - name ; - let local_fn = Local.Host.uninstall_ca_certificate ~host ~name in + name force ; + let local_fn = Local.Host.uninstall_ca_certificate ~host ~name ~force in do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.uninstall_ca_certificate ~rpc ~session_id ~host ~name + ~force ) (* legacy names *) let certificate_install = install_ca_certificate - let certificate_uninstall = uninstall_ca_certificate + let certificate_uninstall = uninstall_ca_certificate ~force:false let certificate_list ~__context ~host = info "Host.certificate_list: host = '%s'" (host_uuid ~__context host) ; diff --git a/ocaml/xapi/xapi_host.ml b/ocaml/xapi/xapi_host.ml index 32139f79896..7958a15a367 100644 --- a/ocaml/xapi/xapi_host.ml +++ b/ocaml/xapi/xapi_host.ml @@ -1548,9 +1548,9 @@ let install_ca_certificate ~__context ~host:_ ~name ~cert = (* don't modify db - Pool.install_ca_certificate will handle that *) Certificates.(host_install CA_Certificate ~name ~cert) -let uninstall_ca_certificate ~__context ~host:_ ~name = +let uninstall_ca_certificate ~__context ~host:_ ~name ~force = (* don't modify db - Pool.uninstall_ca_certificate will handle that *) - Certificates.(host_uninstall CA_Certificate ~name) + Certificates.(host_uninstall CA_Certificate ~name ~force) let certificate_list ~__context ~host:_ = Certificates.(local_list CA_Certificate) @@ -1559,7 +1559,7 @@ let crl_install ~__context ~host:_ ~name ~crl = Certificates.(host_install CRL ~name ~cert:crl) let crl_uninstall ~__context ~host:_ ~name = - Certificates.(host_uninstall CRL ~name) + Certificates.(host_uninstall CRL ~name ~force:false) let crl_list ~__context ~host:_ = Certificates.(local_list CRL) diff --git a/ocaml/xapi/xapi_host.mli b/ocaml/xapi/xapi_host.mli index 8813f037b19..c303ee69597 100644 --- a/ocaml/xapi/xapi_host.mli +++ b/ocaml/xapi/xapi_host.mli @@ -290,7 +290,7 @@ val install_ca_certificate : __context:Context.t -> host:API.ref_host -> name:string -> cert:string -> unit val uninstall_ca_certificate : - __context:Context.t -> host:API.ref_host -> name:string -> unit + __context:Context.t -> host:API.ref_host -> name:string -> force:bool -> unit val certificate_list : __context:'a -> host:'b -> string list diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index e3aca23e47b..044507bc9c2 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -1432,12 +1432,12 @@ let certificate_install ~__context ~name ~cert = let install_ca_certificate = certificate_install -let certificate_uninstall ~__context ~name = +let uninstall_ca_certificate ~__context ~name ~force = let open Certificates in - pool_uninstall CA_Certificate ~__context ~name ; + pool_uninstall CA_Certificate ~__context ~name ~force ; Db_util.remove_ca_cert_by_name ~__context name -let uninstall_ca_certificate = certificate_uninstall +let certificate_uninstall = uninstall_ca_certificate ~force:false let certificate_list ~__context = let open Certificates in @@ -1446,7 +1446,7 @@ let certificate_list ~__context = let crl_install = Certificates.(pool_install CRL) -let crl_uninstall = Certificates.(pool_uninstall CRL) +let crl_uninstall = Certificates.(pool_uninstall CRL ~force:false) let crl_list ~__context = Certificates.(local_list CRL) diff --git a/ocaml/xapi/xapi_pool.mli b/ocaml/xapi/xapi_pool.mli index 0bd71a22996..835a356f782 100644 --- a/ocaml/xapi/xapi_pool.mli +++ b/ocaml/xapi/xapi_pool.mli @@ -248,7 +248,8 @@ val install_ca_certificate : val certificate_uninstall : __context:Context.t -> name:string -> unit -val uninstall_ca_certificate : __context:Context.t -> name:string -> unit +val uninstall_ca_certificate : + __context:Context.t -> name:string -> force:bool -> unit val certificate_list : __context:Context.t -> string list From 98384e8f2d120e8cffead3c032152b202cc6fbd4 Mon Sep 17 00:00:00 2001 From: Andrii Sultanov Date: Wed, 23 Oct 2024 08:39:12 +0100 Subject: [PATCH 139/141] CA-400924 - networkd: Add bonds to `devs` in network_monitor_thread Without it, stats for bond's interfaces are not identified correctly. Fixes: bd4dda5c294aa51045bf3caccc48bb0870a7d428 (IH-715 - rrdp-netdev: Remove double (de)serialization) Signed-off-by: Andrii Sultanov --- ocaml/networkd/bin/network_monitor_thread.ml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ocaml/networkd/bin/network_monitor_thread.ml b/ocaml/networkd/bin/network_monitor_thread.ml index 9c4b7c3352d..1b15dbe2a42 100644 --- a/ocaml/networkd/bin/network_monitor_thread.ml +++ b/ocaml/networkd/bin/network_monitor_thread.ml @@ -225,7 +225,8 @@ let rec monitor dbg () = let bonds : (string * string list) list = Network_server.Bridge.get_all_bonds dbg from_cache in - let devs = get_link_stats () |> get_stats bonds in + let add_bonds bonds devs = List.map fst bonds @ devs in + let devs = get_link_stats () |> add_bonds bonds |> get_stats bonds in ( if List.length bonds <> Hashtbl.length bonds_status then let dead_bonds = Hashtbl.fold From ef071a6e129c8a9cb9afd41bc0b9c69e559d7f0f Mon Sep 17 00:00:00 2001 From: Changlei Li Date: Thu, 24 Oct 2024 14:48:48 +0800 Subject: [PATCH 140/141] CA-400559: API Error too_many_groups is not in go SDK Reason: API errors in go sdk are generated from Api_errors.errors. Error is filled in Api_errors.errors when defined using add_error function. Error too_many_groups is not defined using add_error function. Fix: Define too_many_groups using add_error function. Signed-off-by: Changlei Li --- ocaml/xapi-consts/api_errors.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xapi-consts/api_errors.ml b/ocaml/xapi-consts/api_errors.ml index 97880cde57a..ebafbdaa111 100644 --- a/ocaml/xapi-consts/api_errors.ml +++ b/ocaml/xapi-consts/api_errors.ml @@ -1394,4 +1394,4 @@ let telemetry_next_collection_too_late = (* FIPS/CC_PREPARATIONS *) let illegal_in_fips_mode = add_error "ILLEGAL_IN_FIPS_MODE" -let too_many_groups = "TOO_MANY_GROUPS" +let too_many_groups = add_error "TOO_MANY_GROUPS" From 0d265537c3d363b3f3b188ca3d98ca5a411e0100 Mon Sep 17 00:00:00 2001 From: Pau Ruiz Safont Date: Tue, 22 Oct 2024 16:55:25 +0100 Subject: [PATCH 141/141] chore: annotate types for non-returning functions These become warnings in ocaml 5.0+ Signed-off-by: Pau Ruiz Safont --- ocaml/xapi/xapi.ml | 2 +- ocaml/xapi/xapi_ha.ml | 2 +- ocaml/xenopsd/cli/xn.ml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ocaml/xapi/xapi.ml b/ocaml/xapi/xapi.ml index ed6323663e3..ca87e740efb 100644 --- a/ocaml/xapi/xapi.ml +++ b/ocaml/xapi/xapi.ml @@ -180,7 +180,7 @@ let init_args () = Xapi_globs.xenopsd_queues := ["xenopsd"] ) -let wait_to_die () = +let wait_to_die () : unit = (* don't call Thread.join cos this interacts strangely with OCAML runtime and stops the OCAML-level signal handlers ever getting called... Thread.delay is fine tho' *) while true do diff --git a/ocaml/xapi/xapi_ha.ml b/ocaml/xapi/xapi_ha.ml index ddfbc357fb2..b6ba195f823 100644 --- a/ocaml/xapi/xapi_ha.ml +++ b/ocaml/xapi/xapi_ha.ml @@ -130,7 +130,7 @@ let uuid_of_host_address address = let on_master_failure () = (* The plan is: keep asking if I should be the master. If I'm rejected then query the live set and see if someone else has been marked as master, if so become a slave of them. *) - let become_master () = + let become_master () : unit = info "This node will become the master" ; Xapi_pool_transition.become_master () ; info "Waiting for server restart" ; diff --git a/ocaml/xenopsd/cli/xn.ml b/ocaml/xenopsd/cli/xn.ml index 4c1251cccbd..a6ed6a884bd 100644 --- a/ocaml/xenopsd/cli/xn.ml +++ b/ocaml/xenopsd/cli/xn.ml @@ -1051,7 +1051,7 @@ let unix_proxy path = | 0 -> let buf = Bytes.make 16384 '\000' in let accept, _ = Unix.accept listen in - let copy a b = + let copy a b : unit = while true do let n = Unix.read a buf 0 (Bytes.length buf) in if n = 0 then exit 0 ;