diff --git a/.asf.yaml b/.asf.yaml
index 507a0ca6dd5e..8c1a5d51fdf1 100644
--- a/.asf.yaml
+++ b/.asf.yaml
@@ -58,6 +58,7 @@ github:
- gpordeus
- hsato03
- bernardodemarco
+ - abh1sar
protected_branches: ~
diff --git a/.github/linters/.flake8 b/.github/linters/.flake8
index f250719ca198..3364ad14f290 100644
--- a/.github/linters/.flake8
+++ b/.github/linters/.flake8
@@ -22,8 +22,11 @@
# E224 Tab after operator
# E227 Missing whitespace around bitwise or shift operator
# E242 Tab after ','
+# E271 Multiple spaces after keyword
+# E272 Multiple spaces before keyword
# E273 Tab after keyword
# E274 Tab before keyword
+# E713 Test for membership should be 'not in'
# E742 Do not define classes named 'I', 'O', or 'l'
# E743 Do not define functions named 'I', 'O', or 'l'
# E901 SyntaxError or IndentationError
@@ -37,4 +40,4 @@
exclude =
.git,
venv
-select = E112,E113,E133,E223,E224,E227,E242,E273,E274,E742,E743,E901,E902,W291,W292,W293,W391
+select = E112,E113,E133,E223,E224,E227,E242,E271,E272,E273,E274,E713,E742,E743,E901,E902,W291,W292,W293,W391
diff --git a/.github/linters/.markdown-lint.yml b/.github/linters/.markdown-lint.yml
new file mode 100644
index 000000000000..df1b1a2825e3
--- /dev/null
+++ b/.github/linters/.markdown-lint.yml
@@ -0,0 +1,100 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# MD001/heading-increment Heading levels should only increment by one level at a time
+MD001: false
+
+# MD003/heading-style Heading style
+MD003: false
+
+# MD004/ul-style Unordered list style
+MD004: false
+
+# MD007/ul-indent Unordered list indentation
+MD007: false
+
+# MD009/no-trailing-spaces Trailing spaces
+MD009: false
+
+# MD010/no-hard-tabs Hard tabs
+MD010: false
+
+# MD012/no-multiple-blanks Multiple consecutive blank lines
+MD012: false
+
+# MD013/line-length Line length
+MD013: false
+
+# MD014/commands-show-output Dollar signs used before commands without showing output
+MD014: false
+
+# MD018/no-missing-space-atx No space after hash on atx style heading
+MD018: false
+
+# MD019/no-multiple-space-atx Multiple spaces after hash on atx style heading
+MD019: false
+
+# MD022/blanks-around-headings Headings should be surrounded by blank lines
+MD022: false
+
+# MD023/heading-start-left Headings must start at the beginning of the line
+MD023: false
+
+# MD024/no-duplicate-heading Multiple headings with the same content
+MD024: false
+
+# MD025/single-title/single-h1 Multiple top-level headings in the same document
+MD025: false
+
+# MD026/no-trailing-punctuation Trailing punctuation in heading
+MD026: false
+
+# MD028/no-blanks-blockquote Blank line inside blockquote
+MD028: false
+
+# MD029/ol-prefix Ordered list item prefix
+MD029: false
+
+# MD031/blanks-around-fences Fenced code blocks should be surrounded by blank lines
+MD031: false
+
+# MD032/blanks-around-lists Lists should be surrounded by blank lines
+MD032: false
+
+# MD033/no-inline-html Inline HTML
+MD033: false
+
+# MD034/no-bare-urls Bare URL used
+MD034: false
+
+# MD036/no-emphasis-as-heading Emphasis used instead of a heading
+MD036: false
+
+# MD037/no-space-in-emphasis Spaces inside emphasis markers
+MD037: false
+
+# MD040/fenced-code-language Fenced code blocks should have a language specified
+MD040: false
+
+# MD041/first-line-heading/first-line-h1 First line in a file should be a top-level heading
+MD041: false
+
+# MD046/code-block-style Code block style
+MD046: false
+
+# MD052/reference-links-images Reference links and images should use a label that is defined
+MD052: false
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index fac2d6266fa5..133e2c35b4ec 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -86,7 +86,9 @@ jobs:
smoke/test_migration
smoke/test_multipleips_per_nic
smoke/test_nested_virtualization
- smoke/test_set_sourcenat",
+ smoke/test_set_sourcenat
+ smoke/test_webhook_lifecycle
+ smoke/test_purge_expunged_vms",
"smoke/test_network
smoke/test_network_acl
smoke/test_network_ipv6
@@ -132,6 +134,7 @@ jobs:
smoke/test_usage
smoke/test_usage_events
smoke/test_vm_deployment_planner
+ smoke/test_vm_strict_host_tags
smoke/test_vm_schedule
smoke/test_vm_life_cycle
smoke/test_vm_lifecycle_unmanage_import
diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml
index 784df0cf03ca..b6c814a36f4c 100644
--- a/.github/workflows/linter.yml
+++ b/.github/workflows/linter.yml
@@ -39,7 +39,7 @@ jobs:
pip install pre-commit
- name: Set PY
run: echo "PY=$(python -VV | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV
- - uses: actions/cache@v3
+ - uses: actions/cache@v4
with:
path: ~/.cache/pre-commit
key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }}
diff --git a/.github/workflows/main-sonar-check.yml b/.github/workflows/main-sonar-check.yml
index 66bb1093e040..07d15583c825 100644
--- a/.github/workflows/main-sonar-check.yml
+++ b/.github/workflows/main-sonar-check.yml
@@ -44,14 +44,14 @@ jobs:
cache: 'maven'
- name: Cache SonarCloud packages
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: ~/.sonar/cache
key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar
- name: Cache local Maven repository
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: ~/.m2/repository
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
diff --git a/.github/workflows/sonar-check.yml b/.github/workflows/sonar-check.yml
index 2ebcf1fb2db7..5d1bcc5dc223 100644
--- a/.github/workflows/sonar-check.yml
+++ b/.github/workflows/sonar-check.yml
@@ -46,14 +46,14 @@ jobs:
cache: 'maven'
- name: Cache SonarCloud packages
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: ~/.sonar/cache
key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar
- name: Cache local Maven repository
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: ~/.m2/repository
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 9004ed9daeea..8736e5bac941 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -36,6 +36,8 @@ repos:
- id: check-vcs-permalinks
#- id: check-yaml
- id: destroyed-symlinks
+ - id: detect-aws-credentials
+ args: [--allow-missing-credentials]
- id: detect-private-key
exclude: >
(?x)
@@ -53,9 +55,13 @@ repos:
- id: end-of-file-fixer
exclude: \.vhd$
#- id: fix-byte-order-marker
+ - id: forbid-submodules
- id: mixed-line-ending
exclude: \.(cs|xml)$
- # - id: trailing-whitespace
+ - id: trailing-whitespace
+ files: \.(header|in|java|md|properties|py|rb|sh|sql|txt|vue|yaml|yml)$
+ args: [--markdown-linebreak-ext=md]
+ exclude: ^services/console-proxy/rdpconsole/src/test/doc/freerdp-debug-log\.txt$
- repo: https://github.com/pycqa/flake8
rev: 7.0.0
hooks:
@@ -72,3 +78,12 @@ repos:
^scripts/vm/hypervisor/xenserver/vmopspremium$|
^setup/bindir/cloud-setup-encryption\.in$|
^venv/.*$
+ - repo: https://github.com/igorshubovych/markdownlint-cli
+ rev: v0.40.0
+ hooks:
+ - id: markdownlint
+ name: run markdownlint
+ description: check Markdown files with markdownlint
+ args: [--config=.github/linters/.markdown-lint.yml]
+ types: [markdown]
+ files: \.(md|mdown|markdown)$
diff --git a/CHANGES.md b/CHANGES.md
index ef498f8edf0b..9544fc014c75 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -430,11 +430,11 @@ Bug ID | Description
[CLOUDSTACK-6099](https://issues.apache.org/jira/browse/CLOUDSTACK-6099) | live migration is failing for vm deployed using dynamic compute offerings with NPE
[CLOUDSTACK-7528](https://issues.apache.org/jira/browse/CLOUDSTACK-7528) | More verbose logging when sending alert fails
[CLOUDSTACK-6624](https://issues.apache.org/jira/browse/CLOUDSTACK-6624) | set specifyIpRanges to true if specifyVlan is set to true
-[CLOUDSTACK-7404](https://issues.apache.org/jira/browse/CLOUDSTACK-7404) | Failed to start an instance when originating template has been deleted
+[CLOUDSTACK-7404](https://issues.apache.org/jira/browse/CLOUDSTACK-7404) | Failed to start an instance when originating template has been deleted
[CLOUDSTACK-6531](https://issues.apache.org/jira/browse/CLOUDSTACK-6531) | Stopping the router in case of command failures
[CLOUDSTACK-6115](https://issues.apache.org/jira/browse/CLOUDSTACK-6115) | TravisCI configuration
[CLOUDSTACK-7405](https://issues.apache.org/jira/browse/CLOUDSTACK-7405) | allowing VR meta-data to be accessed without trailing slash
-[CLOUDSTACK-7260](https://issues.apache.org/jira/browse/CLOUDSTACK-7260) | Management server not responding after some time for Vmware due to Oom
+[CLOUDSTACK-7260](https://issues.apache.org/jira/browse/CLOUDSTACK-7260) | Management server not responding after some time for Vmware due to Oom
[CLOUDSTACK-7038](https://issues.apache.org/jira/browse/CLOUDSTACK-7038) | Add mysql client dependency for mgmt server pkg for debian
[CLOUDSTACK-6892](https://issues.apache.org/jira/browse/CLOUDSTACK-6892) | Create separate package for the mysql HA component
[CLOUDSTACK-7038](https://issues.apache.org/jira/browse/CLOUDSTACK-7038) | Add mysql client dependency for mgmt server/rpms
@@ -449,12 +449,12 @@ Bug ID | Description
[CLOUDSTACK-7006](https://issues.apache.org/jira/browse/CLOUDSTACK-7006) | Restore template ID in ROOT volume usages
[CLOUDSTACK-6747](https://issues.apache.org/jira/browse/CLOUDSTACK-6747) | test to allow all cidrs on other end of vpc
[CLOUDSTACK-6272](https://issues.apache.org/jira/browse/CLOUDSTACK-6272) | Fix recover/restore VM actions
-[CLOUDSTACK-6927](https://issues.apache.org/jira/browse/CLOUDSTACK-6927) | store virsh list in list instead of querying libvirt
+[CLOUDSTACK-6927](https://issues.apache.org/jira/browse/CLOUDSTACK-6927) | store virsh list in list instead of querying libvirt
[CLOUDSTACK-6317](https://issues.apache.org/jira/browse/CLOUDSTACK-6317) | [VMware] Tagged VLAN support broken for Management/Control/Storage traffic
[CLOUDSTACK-5891](https://issues.apache.org/jira/browse/CLOUDSTACK-5891) | [VMware] If a template has been registered and "cpu.corespersocket=X" ,
[CLOUDSTACK-6478](https://issues.apache.org/jira/browse/CLOUDSTACK-6478) | Failed to download Template when having 3 SSVM's in one
[CLOUDSTACK-6464](https://issues.apache.org/jira/browse/CLOUDSTACK-6464) | if guest network type is vlan://untagged, and traffic label is used
-[CLOUDSTACK-6816](https://issues.apache.org/jira/browse/CLOUDSTACK-6816) | bugfix: cloudstack-setup-management make /root directory's permission 0777
+[CLOUDSTACK-6816](https://issues.apache.org/jira/browse/CLOUDSTACK-6816) | bugfix: cloudstack-setup-management make /root directory's permission 0777
[CLOUDSTACK-6204](https://issues.apache.org/jira/browse/CLOUDSTACK-6204) | applying missed patch
[CLOUDSTACK-6472](https://issues.apache.org/jira/browse/CLOUDSTACK-6472) | (4.3 specific) listUsageRecords: Pull information from removed items as well
[CLOUDSTACK-5976](https://issues.apache.org/jira/browse/CLOUDSTACK-5976) | Typo in "ssh_keypairs" table's foreign key constraints on the Upgraded Setup
@@ -657,11 +657,11 @@ Version 4.1.0
-------------
This is the second major release of CloudStack from within the Apache Software Foundation, and the
-first major release as a Top-Level Project (TLP).
+first major release as a Top-Level Project (TLP).
Build Tool Changes:
- * The project now uses Maven 3 exclusively to build.
+ * The project now uses Maven 3 exclusively to build.
New Features:
* CLOUDSTACK-101: OVS support in KVM
@@ -976,14 +976,14 @@ Issues fixed in this release:
* CLOUDSTACK-1845: KVM - storage migration often fails
* CLOUDSTACK-1846: KVM - storage pools can silently fail to be unregistered, leading to failure to register later
* CLOUDSTACK-2003: Deleting domain while deleted account is cleaning up leaves VMs expunging forever due to 'Failed to update resource count'
-* CLOUDSTACK-2090: Upgrade from version 4.0.1 to version 4.0.2 triggers the 4.0.0 to 4.0.1.
+* CLOUDSTACK-2090: Upgrade from version 4.0.1 to version 4.0.2 triggers the 4.0.0 to 4.0.1.
* CLOUDSTACK-2091: Error in API documentation for 4.0.x.
Version 4.0.1-incubating
------------------------
-This is a bugfix release for Apache CloudStack 4.0.0-incubating, with no new features.
+This is a bugfix release for Apache CloudStack 4.0.0-incubating, with no new features.
Security Fixes:
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index cdfbfe77b7ed..bb84e4e91fb3 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -3,7 +3,7 @@ Contributing to Apache CloudStack (ACS)
Summary
-------
-This document covers how to contribute to the ACS project. ACS uses GitHub PRs to manage code contributions.
+This document covers how to contribute to the ACS project. ACS uses GitHub PRs to manage code contributions.
These instructions assume you have a GitHub.com account, so if you don't have one you will have to create one. Your proposed code changes will be published to your own fork of the ACS project and you will submit a Pull Request for your changes to be added.
_Lets get started!!!_
@@ -11,17 +11,17 @@ _Lets get started!!!_
Bug fixes
---------
-It's very important that we can easily track bug fix commits, so their hashes should remain the same in all branches.
-Therefore, a pull request (PR) that fixes a bug, should be sent against a release branch.
-This can be either the "current release" or the "previous release", depending on which ones are maintained.
+It's very important that we can easily track bug fix commits, so their hashes should remain the same in all branches.
+Therefore, a pull request (PR) that fixes a bug, should be sent against a release branch.
+This can be either the "current release" or the "previous release", depending on which ones are maintained.
Since the goal is a stable main, bug fixes should be "merged forward" to the next branch in order: "previous release" -> "current release" -> main (in other words: old to new)
Developing new features
-----------------------
-Development should be done in a feature branch, branched off of main.
-Send a PR(steps below) to get it into main (2x LGTM applies).
-PR will only be merged when main is open, will be held otherwise until main is open again.
+Development should be done in a feature branch, branched off of main.
+Send a PR(steps below) to get it into main (2x LGTM applies).
+PR will only be merged when main is open, will be held otherwise until main is open again.
No back porting / cherry-picking features to existing branches!
PendingReleaseNotes file
@@ -33,7 +33,7 @@ When adding information to the PendingReleaseNotes file make sure that you write
Updating the PendingReleaseNotes file is preferably a part of the original Pull Request, but that is up to the developers' discretion.
-Fork the code
+Fork the code
-------------
In your browser, navigate to: [https://github.com/apache/cloudstack](https://github.com/apache/cloudstack)
@@ -136,4 +136,4 @@ $ git push origin :feature_x
Release Principles
------------------
-Detailed information about ACS release principles is available at https://cwiki.apache.org/confluence/display/CLOUDSTACK/Release+principles+for+Apache+CloudStack+4.6+and+up
+Detailed information about ACS release principles is available at https://cwiki.apache.org/confluence/display/CLOUDSTACK/Release+principles+for+Apache+CloudStack+4.6+and+up
diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md
index 8293a22973a2..e02cc6518535 100644
--- a/PULL_REQUEST_TEMPLATE.md
+++ b/PULL_REQUEST_TEMPLATE.md
@@ -23,6 +23,7 @@ This PR...
- [ ] Enhancement (improves an existing feature and functionality)
- [ ] Cleanup (Code refactoring and cleanup, that may add test cases)
- [ ] build/CI
+- [ ] test (unit or integration test code)
### Feature/Enhancement Scale or Bug Severity
diff --git a/README.md b/README.md
index e193913612f1..f66a4dc6f975 100644
--- a/README.md
+++ b/README.md
@@ -142,7 +142,7 @@ This distribution includes cryptographic software. The country in which you curr
reside may have restrictions on the import, possession, use, and/or re-export to another
country, of encryption software. BEFORE using any encryption software, please check your
country's laws, regulations and policies concerning the import, possession, or use, and
-re-export of encryption software, to see if this is permitted. See [The Wassenaar Arrangement](http://www.wassenaar.org/)
+re-export of encryption software, to see if this is permitted. See [The Wassenaar Arrangement](http://www.wassenaar.org/)
for more information.
The U.S. Government Department of Commerce, Bureau of Industry and Security (BIS), has
diff --git a/agent/bindir/cloud-setup-agent.in b/agent/bindir/cloud-setup-agent.in
index 53c6c2f56aa4..18de64089ed0 100755
--- a/agent/bindir/cloud-setup-agent.in
+++ b/agent/bindir/cloud-setup-agent.in
@@ -6,9 +6,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
diff --git a/agent/bindir/cloud-ssh.in b/agent/bindir/cloud-ssh.in
index e4b3c141a975..a5ea975c2f3a 100644
--- a/agent/bindir/cloud-ssh.in
+++ b/agent/bindir/cloud-ssh.in
@@ -6,9 +6,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties
index e600e8f8f201..3b6a7b7de292 100644
--- a/agent/conf/agent.properties
+++ b/agent/conf/agent.properties
@@ -430,3 +430,6 @@ iscsi.session.cleanup.enabled=false
# If set to "true", the agent will register for libvirt domain events, allowing for immediate updates on crashed or
# unexpectedly stopped. Experimental, requires agent restart.
# libvirt.events.enabled=false
+
+# Implicit host tags managed by agent.properties
+# host.tags=
diff --git a/agent/conf/cloudstack-agent.logrotate.in b/agent/conf/cloudstack-agent.logrotate.in
index 2b3dc87f2532..9f22b4bab868 100644
--- a/agent/conf/cloudstack-agent.logrotate.in
+++ b/agent/conf/cloudstack-agent.logrotate.in
@@ -15,11 +15,13 @@
# specific language governing permissions and limitations
# under the License.
-/var/log/cloudstack/agent/security_group.log /var/log/cloudstack/agent/resizevolume.log /var/log/cloudstack/agent/rolling-maintenance.log {
+/var/log/cloudstack/agent/security_group.log /var/log/cloudstack/agent/resizevolume.log /var/log/cloudstack/agent/rolling-maintenance.log /var/log/cloudstack/agent/agent.out /var/log/cloudstack/agent/agent.err {
copytruncate
daily
rotate 5
compress
missingok
size 10M
+ dateext
+ dateformat -%Y-%m-%d
}
diff --git a/agent/conf/environment.properties.in b/agent/conf/environment.properties.in
index 514161a13fc5..b6cc5bbd9879 100644
--- a/agent/conf/environment.properties.in
+++ b/agent/conf/environment.properties.in
@@ -5,9 +5,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
diff --git a/agent/conf/log4j-cloud.xml.in b/agent/conf/log4j-cloud.xml.in
index 44ebd1358af6..29c1d5ee6415 100644
--- a/agent/conf/log4j-cloud.xml.in
+++ b/agent/conf/log4j-cloud.xml.in
@@ -38,7 +38,7 @@ under the License.
-
+
diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java
index 56732dad9936..d2d4f165979d 100644
--- a/agent/src/main/java/com/cloud/agent/Agent.java
+++ b/agent/src/main/java/com/cloud/agent/Agent.java
@@ -1127,6 +1127,12 @@ public void doTask(final Task task) throws TaskExecutionException {
logger.error("Error parsing task", e);
}
} else if (task.getType() == Task.Type.DISCONNECT) {
+ try {
+ // an issue has been found if reconnect immediately after disconnecting. please refer to https://github.com/apache/cloudstack/issues/8517
+ // wait 5 seconds before reconnecting
+ Thread.sleep(5000);
+ } catch (InterruptedException e) {
+ }
reconnect(task.getLink());
return;
} else if (task.getType() == Task.Type.OTHER) {
diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java
index 24a09ae2ac11..8f97edc39357 100644
--- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java
+++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java
@@ -751,7 +751,7 @@ public Property getWorkers() {
public static final Property IOTHREADS = new Property<>("iothreads", 1);
/**
- * Enable verbose mode for virt-v2v Instance Conversion from Vmware to KVM
+ * Enable verbose mode for virt-v2v Instance Conversion from VMware to KVM
* Data type: Boolean.
* Default value: false
*/
@@ -803,6 +803,13 @@ public Property getWorkers() {
*/
public static final Property KEYSTORE_PASSPHRASE = new Property<>(KeyStoreUtils.KS_PASSPHRASE_PROPERTY, null, String.class);
+ /**
+ * Implicit host tags
+ * Data type: String.
+ * Default value: null
+ */
+ public static final Property HOST_TAGS = new Property<>("host.tags", null, String.class);
+
public static class Property {
private String name;
private T defaultValue;
diff --git a/agent/src/test/java/com/cloud/agent/AgentShellTest.java b/agent/src/test/java/com/cloud/agent/AgentShellTest.java
index f7151779f585..4126692546f2 100644
--- a/agent/src/test/java/com/cloud/agent/AgentShellTest.java
+++ b/agent/src/test/java/com/cloud/agent/AgentShellTest.java
@@ -350,4 +350,16 @@ public void setHostTestValueIsNullPropertyDoesNotStartAndEndWithAtSignSetHosts()
Mockito.verify(agentShellSpy).setHosts(expected);
}
+
+ @Test
+ public void updateAndGetConnectedHost() {
+ String expected = "test";
+
+ AgentShell shell = new AgentShell();
+ shell.setHosts("test");
+ shell.getNextHost();
+ shell.updateConnectedHost();
+
+ Assert.assertEquals(expected, shell.getConnectedHost());
+ }
}
diff --git a/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java b/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java
index 6e7aa8b21e28..d86eb2a3a7f7 100644
--- a/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java
+++ b/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java
@@ -18,40 +18,39 @@
*/
package com.cloud.agent.api.to;
+import java.io.Serializable;
+
import com.cloud.agent.api.LogLevel;
import com.cloud.hypervisor.Hypervisor;
-import java.io.Serializable;
-
public class RemoteInstanceTO implements Serializable {
private Hypervisor.HypervisorType hypervisorType;
- private String hostName;
private String instanceName;
- // Vmware Remote Instances parameters
+ // VMware Remote Instances parameters (required for exporting OVA through ovftool)
// TODO: cloud.agent.transport.Request#getCommands() cannot handle gsoc decode for polymorphic classes
private String vcenterUsername;
@LogLevel(LogLevel.Log4jLevel.Off)
private String vcenterPassword;
private String vcenterHost;
private String datacenterName;
- private String clusterName;
public RemoteInstanceTO() {
}
- public RemoteInstanceTO(String hostName, String instanceName, String vcenterHost,
- String datacenterName, String clusterName,
- String vcenterUsername, String vcenterPassword) {
+ public RemoteInstanceTO(String instanceName) {
+ this.hypervisorType = Hypervisor.HypervisorType.VMware;
+ this.instanceName = instanceName;
+ }
+
+ public RemoteInstanceTO(String instanceName, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName) {
this.hypervisorType = Hypervisor.HypervisorType.VMware;
- this.hostName = hostName;
this.instanceName = instanceName;
this.vcenterHost = vcenterHost;
- this.datacenterName = datacenterName;
- this.clusterName = clusterName;
this.vcenterUsername = vcenterUsername;
this.vcenterPassword = vcenterPassword;
+ this.datacenterName = datacenterName;
}
public Hypervisor.HypervisorType getHypervisorType() {
@@ -62,10 +61,6 @@ public String getInstanceName() {
return this.instanceName;
}
- public String getHostName() {
- return this.hostName;
- }
-
public String getVcenterUsername() {
return vcenterUsername;
}
@@ -81,8 +76,4 @@ public String getVcenterHost() {
public String getDatacenterName() {
return datacenterName;
}
-
- public String getClusterName() {
- return clusterName;
- }
}
diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java
index 01ad12a71e08..d4235cbc9bcb 100644
--- a/api/src/main/java/com/cloud/event/EventTypes.java
+++ b/api/src/main/java/com/cloud/event/EventTypes.java
@@ -29,9 +29,9 @@
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.config.Configuration;
import org.apache.cloudstack.ha.HAConfig;
+import org.apache.cloudstack.quota.QuotaTariff;
import org.apache.cloudstack.storage.object.Bucket;
import org.apache.cloudstack.storage.object.ObjectStore;
-import org.apache.cloudstack.quota.QuotaTariff;
import org.apache.cloudstack.usage.Usage;
import org.apache.cloudstack.vm.schedule.VMSchedule;
@@ -451,6 +451,7 @@ public class EventTypes {
public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS";
public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS";
public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL";
+ public static final String EVENT_CHANGE_STORAGE_POOL_SCOPE = "CHANGE.STORAGE.POOL.SCOPE";
// VPN
public static final String EVENT_REMOTE_ACCESS_VPN_CREATE = "VPN.REMOTE.ACCESS.CREATE";
@@ -721,6 +722,8 @@ public class EventTypes {
// SystemVM
public static final String EVENT_LIVE_PATCH_SYSTEMVM = "LIVE.PATCH.SYSTEM.VM";
+ //Purge resources
+ public static final String EVENT_PURGE_EXPUNGED_RESOURCES = "PURGE.EXPUNGED.RESOURCES";
// OBJECT STORE
public static final String EVENT_OBJECT_STORE_CREATE = "OBJECT.STORE.CREATE";
@@ -1000,6 +1003,7 @@ public class EventTypes {
// Primary storage pool
entityEventDetails.put(EVENT_ENABLE_PRIMARY_STORAGE, StoragePool.class);
entityEventDetails.put(EVENT_DISABLE_PRIMARY_STORAGE, StoragePool.class);
+ entityEventDetails.put(EVENT_CHANGE_STORAGE_POOL_SCOPE, StoragePool.class);
// VPN
entityEventDetails.put(EVENT_REMOTE_ACCESS_VPN_CREATE, RemoteAccessVpn.class);
@@ -1229,4 +1233,8 @@ public static Class getEntityClassForEvent(String eventName) {
public static boolean isVpcEvent(String eventType) {
return EventTypes.EVENT_VPC_CREATE.equals(eventType) || EventTypes.EVENT_VPC_DELETE.equals(eventType);
}
+
+ public static void addEntityEventDetail(String event, Class> clazz) {
+ entityEventDetails.put(event, clazz);
+ }
}
diff --git a/api/src/main/java/com/cloud/host/Host.java b/api/src/main/java/com/cloud/host/Host.java
index 7563bc3b7426..4a3b914364f8 100644
--- a/api/src/main/java/com/cloud/host/Host.java
+++ b/api/src/main/java/com/cloud/host/Host.java
@@ -54,6 +54,7 @@ public static String[] toStrings(Host.Type... types) {
}
public static final String HOST_UEFI_ENABLE = "host.uefi.enable";
public static final String HOST_VOLUME_ENCRYPTION = "host.volume.encryption";
+ public static final String HOST_INSTANCE_CONVERSION = "host.instance.conversion";
/**
* @return name of the machine.
diff --git a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java
index 3c7dbac6442c..0c821b4e36c0 100644
--- a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java
+++ b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java
@@ -23,6 +23,7 @@
import org.apache.cloudstack.framework.config.ConfigKey;
import com.cloud.agent.api.Command;
+import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.NicTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
@@ -101,21 +102,20 @@ boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location, Backu
* Will generate commands to migrate a vm to a pool. For now this will only work for stopped VMs on Vmware.
*
* @param vm the stopped vm to migrate
- * @param destination the primary storage pool to migrate to
+ * @param volumeToPool the primary storage pools to migrate to
* @return a list of commands to perform for a successful migration
*/
List finalizeMigrate(VirtualMachine vm, Map volumeToPool);
/**
- * Will perform a clone of a VM on an external host (if the guru can handle)
+ * Will return the hypervisor VM (clone VM for PowerOn VMs), performs a clone of a VM if required on an external host (if the guru can handle)
* @param hostIp VM's source host IP
- * @param vmName name of the source VM to clone from
+ * @param vmName name of the source VM (clone VM name if cloned)
* @param params hypervisor specific additional parameters
- * @return a reference to the cloned VM
+ * @return a reference to the hypervisor or cloned VM, and cloned flag
*/
- UnmanagedInstanceTO cloneHypervisorVMOutOfBand(String hostIp, String vmName,
- Map params);
+ Pair getHypervisorVMOutOfBandAndCloneIfRequired(String hostIp, String vmName, Map params);
/**
* Removes a VM created as a clone of a VM on an external host
@@ -124,6 +124,23 @@ UnmanagedInstanceTO cloneHypervisorVMOutOfBand(String hostIp, String vmName,
* @param params hypervisor specific additional parameters
* @return true if the operation succeeds, false if not
*/
- boolean removeClonedHypervisorVMOutOfBand(String hostIp, String vmName,
- Map params);
+ boolean removeClonedHypervisorVMOutOfBand(String hostIp, String vmName, Map params);
+
+ /**
+ * Create an OVA/OVF template of a VM on an external host (if the guru can handle)
+ * @param hostIp VM's source host IP
+ * @param vmName name of the source VM to create template from
+ * @param params hypervisor specific additional parameters
+ * @param templateLocation datastore to create the template file
+ * @return the created template dir/name
+ */
+ String createVMTemplateOutOfBand(String hostIp, String vmName, Map params, DataStoreTO templateLocation, int threadsCountToExportOvf);
+
+ /**
+ * Removes the template on the location
+ * @param templateLocation datastore to remove the template file
+ * @param templateDir the template dir to remove from datastore
+ * @return true if the operation succeeds, false if not
+ */
+ boolean removeVMTemplateOutOfBand(DataStoreTO templateLocation, String templateDir);
}
diff --git a/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterHelper.java b/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelper.java
similarity index 88%
rename from api/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterHelper.java
rename to api/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelper.java
index e160227749db..a13c1b3a6a89 100644
--- a/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterHelper.java
+++ b/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelper.java
@@ -16,11 +16,14 @@
// under the License.
package com.cloud.kubernetes.cluster;
-import com.cloud.utils.component.Adapter;
import org.apache.cloudstack.acl.ControlledEntity;
-public interface KubernetesClusterHelper extends Adapter {
+import com.cloud.uservm.UserVm;
+import com.cloud.utils.component.Adapter;
+
+public interface KubernetesServiceHelper extends Adapter {
ControlledEntity findByUuid(String uuid);
ControlledEntity findByVmId(long vmId);
+ void checkVmCanBeDestroyed(UserVm userVm);
}
diff --git a/api/src/main/java/com/cloud/network/NetworkModel.java b/api/src/main/java/com/cloud/network/NetworkModel.java
index 53ac735cf050..699dcbf6c508 100644
--- a/api/src/main/java/com/cloud/network/NetworkModel.java
+++ b/api/src/main/java/com/cloud/network/NetworkModel.java
@@ -317,6 +317,8 @@ public interface NetworkModel {
void checkIp6Parameters(String startIPv6, String endIPv6, String ip6Gateway, String ip6Cidr) throws InvalidParameterValueException;
+ void checkIp6CidrSizeEqualTo64(String ip6Cidr) throws InvalidParameterValueException;
+
void checkRequestedIpAddresses(long networkId, IpAddresses ips) throws InvalidParameterValueException;
String getStartIpv6Address(long id);
diff --git a/api/src/main/java/com/cloud/network/NetworkService.java b/api/src/main/java/com/cloud/network/NetworkService.java
index 51799e25cda6..b8dd464b3655 100644
--- a/api/src/main/java/com/cloud/network/NetworkService.java
+++ b/api/src/main/java/com/cloud/network/NetworkService.java
@@ -20,6 +20,7 @@
import java.util.Map;
import com.cloud.dc.DataCenter;
+import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.api.command.admin.address.ReleasePodIpCmdByAdmin;
import org.apache.cloudstack.api.command.admin.network.DedicateGuestVlanRangeCmd;
import org.apache.cloudstack.api.command.admin.network.ListDedicatedGuestVlanRangesCmd;
@@ -102,6 +103,10 @@ IpAddress allocatePortableIP(Account ipOwner, int regionId, Long zoneId, Long ne
Network createGuestNetwork(CreateNetworkCmd cmd) throws InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException;
+ Network createGuestNetwork(long networkOfferingId, String name, String displayText, Account owner,
+ PhysicalNetwork physicalNetwork, long zoneId, ControlledEntity.ACLType aclType) throws
+ InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException;
+
Pair, Integer> searchForNetworks(ListNetworksCmd cmd);
boolean deleteNetwork(long networkId, boolean forced);
diff --git a/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java b/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java
index c47500c78495..cb92739d2837 100644
--- a/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java
+++ b/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java
@@ -17,17 +17,22 @@
package com.cloud.network;
import java.util.List;
+import java.util.Map;
import org.apache.cloudstack.api.command.admin.router.UpgradeRouterCmd;
import org.apache.cloudstack.api.command.admin.router.UpgradeRouterTemplateCmd;
+import com.cloud.deploy.DeploymentPlanner;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.OperationTimedoutException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.router.VirtualRouter;
import com.cloud.user.Account;
import com.cloud.utils.Pair;
import com.cloud.vm.Nic;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachineProfile;
public interface VirtualNetworkApplianceService {
/**
@@ -62,6 +67,10 @@ public interface VirtualNetworkApplianceService {
VirtualRouter startRouter(long id) throws ResourceUnavailableException, InsufficientCapacityException, ConcurrentOperationException;
+ void startRouterForHA(VirtualMachine vm, Map params, DeploymentPlanner planner)
+ throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException,
+ OperationTimedoutException;
+
VirtualRouter destroyRouter(long routerId, Account caller, Long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException;
VirtualRouter findRouter(long routerId);
diff --git a/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java b/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java
index 5c3ee3f1032a..cd04db802cac 100644
--- a/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java
+++ b/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java
@@ -29,7 +29,6 @@ public interface VpcVirtualNetworkApplianceService extends VirtualNetworkApplian
/**
* @param router
* @param network
- * @param isRedundant
* @param params TODO
* @return
* @throws ConcurrentOperationException
@@ -42,11 +41,30 @@ boolean addVpcRouterToGuestNetwork(VirtualRouter router, Network network, Map updateHealthChecks(Network network, List lbrules);
boolean handlesOnlyRulesInTransitionState();
+
+ default void expungeLbVmRefs(List vmIds, Long batchSize) {
+ }
}
diff --git a/api/src/main/java/com/cloud/network/guru/NetworkGuru.java b/api/src/main/java/com/cloud/network/guru/NetworkGuru.java
index cbadbb18a8f1..7b81c75ed845 100644
--- a/api/src/main/java/com/cloud/network/guru/NetworkGuru.java
+++ b/api/src/main/java/com/cloud/network/guru/NetworkGuru.java
@@ -212,4 +212,7 @@ void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDe
boolean isMyTrafficType(TrafficType type);
+ default boolean isSlaacV6Only() {
+ return true;
+ }
}
diff --git a/api/src/main/java/com/cloud/network/vpc/VpcService.java b/api/src/main/java/com/cloud/network/vpc/VpcService.java
index 2cdc034a16e1..0f0d29f4082c 100644
--- a/api/src/main/java/com/cloud/network/vpc/VpcService.java
+++ b/api/src/main/java/com/cloud/network/vpc/VpcService.java
@@ -132,6 +132,8 @@ Pair, Integer> listVpcs(Long id, String vpcName, String disp
*/
boolean startVpc(long vpcId, boolean destroyOnFailure) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException;
+ void startVpc(CreateVPCCmd cmd) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException;
+
/**
* Shuts down the VPC which includes shutting down all VPC provider and rules cleanup on the backend
*
diff --git a/api/src/main/java/com/cloud/offering/ServiceOffering.java b/api/src/main/java/com/cloud/offering/ServiceOffering.java
index 58c7b0dbaf96..acb7a9f1cf91 100644
--- a/api/src/main/java/com/cloud/offering/ServiceOffering.java
+++ b/api/src/main/java/com/cloud/offering/ServiceOffering.java
@@ -33,6 +33,9 @@ public interface ServiceOffering extends InfrastructureEntity, InternalIdentity,
static final String internalLbVmDefaultOffUniqueName = "Cloud.Com-InternalLBVm";
// leaving cloud.com references as these are identifyers and no real world addresses (check against DB)
+
+ static final String PURGE_DB_ENTITIES_KEY = "purge.db.entities";
+
enum State {
Inactive, Active,
}
diff --git a/api/src/main/java/com/cloud/storage/StorageService.java b/api/src/main/java/com/cloud/storage/StorageService.java
index c3609cfd8eea..1ce335b01153 100644
--- a/api/src/main/java/com/cloud/storage/StorageService.java
+++ b/api/src/main/java/com/cloud/storage/StorageService.java
@@ -21,6 +21,7 @@
import java.util.Map;
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
+import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
@@ -29,11 +30,13 @@
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd;
+import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
import com.cloud.exception.DiscoveryException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.ResourceInUseException;
import com.cloud.exception.ResourceUnavailableException;
import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd;
@@ -110,6 +113,8 @@ public interface StorageService {
*/
ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws DiscoveryException;
+ ImageStore updateImageStore(UpdateImageStoreCmd cmd);
+
ImageStore updateImageStoreStatus(Long id, Boolean readonly);
void updateStorageCapabilities(Long poolId, boolean failOnChecks);
@@ -127,4 +132,6 @@ public interface StorageService {
boolean deleteObjectStore(DeleteObjectStoragePoolCmd cmd);
ObjectStore updateObjectStore(Long id, UpdateObjectStoragePoolCmd cmd);
+
+ void changeStoragePoolScope(ChangeStoragePoolScopeCmd cmd) throws IllegalArgumentException, InvalidParameterValueException, PermissionDeniedException;
}
diff --git a/api/src/main/java/com/cloud/user/ResourceLimitService.java b/api/src/main/java/com/cloud/user/ResourceLimitService.java
index d0aa9f69f840..3b30b8fc4a57 100644
--- a/api/src/main/java/com/cloud/user/ResourceLimitService.java
+++ b/api/src/main/java/com/cloud/user/ResourceLimitService.java
@@ -38,7 +38,10 @@ public interface ResourceLimitService {
static final ConfigKey MaxProjectSecondaryStorage = new ConfigKey<>("Project Defaults", Long.class, "max.project.secondary.storage", "400",
"The default maximum secondary storage space (in GiB) that can be used for a project", false);
static final ConfigKey ResourceCountCheckInterval = new ConfigKey<>("Advanced", Long.class, "resourcecount.check.interval", "300",
- "Time (in seconds) to wait before running resource recalculation and fixing task. Default is 300 seconds, Setting this to 0 disables execution of the task", true);
+ "Time (in seconds) to wait before running resource recalculation and fixing tasks like stale resource reservation cleanup" +
+ ". Default is 300 seconds, Setting this to 0 disables execution of the task", true);
+ static final ConfigKey ResourceReservationCleanupDelay = new ConfigKey<>("Advanced", Long.class, "resource.reservation.cleanup.delay", "3600",
+ "Time (in seconds) after which a resource reservation gets deleted. Default is 3600 seconds, Setting this to 0 disables execution of the task", true);
static final ConfigKey ResourceLimitHostTags = new ConfigKey<>("Advanced", String.class, "resource.limit.host.tags", "",
"A comma-separated list of tags for host resource limits", true);
static final ConfigKey ResourceLimitStorageTags = new ConfigKey<>("Advanced", String.class, "resource.limit.storage.tags", "",
@@ -243,6 +246,8 @@ public interface ResourceLimitService {
void checkVolumeResourceLimitForDiskOfferingChange(Account owner, Boolean display, Long currentSize, Long newSize,
DiskOffering currentOffering, DiskOffering newOffering) throws ResourceAllocationException;
+ void checkPrimaryStorageResourceLimit(Account owner, Boolean display, Long size, DiskOffering diskOffering) throws ResourceAllocationException;
+
void incrementVolumeResourceCount(long accountId, Boolean display, Long size, DiskOffering diskOffering);
void decrementVolumeResourceCount(long accountId, Boolean display, Long size, DiskOffering diskOffering);
diff --git a/api/src/main/java/com/cloud/uservm/UserVm.java b/api/src/main/java/com/cloud/uservm/UserVm.java
index e30f5e030544..9035d2903c9a 100644
--- a/api/src/main/java/com/cloud/uservm/UserVm.java
+++ b/api/src/main/java/com/cloud/uservm/UserVm.java
@@ -48,4 +48,6 @@ public interface UserVm extends VirtualMachine, ControlledEntity {
void setAccountId(long accountId);
public boolean isDisplayVm();
+
+ String getUserVmType();
}
diff --git a/api/src/main/java/com/cloud/vm/NicProfile.java b/api/src/main/java/com/cloud/vm/NicProfile.java
index d3c1daa1f5da..183c8dcb2d59 100644
--- a/api/src/main/java/com/cloud/vm/NicProfile.java
+++ b/api/src/main/java/com/cloud/vm/NicProfile.java
@@ -62,6 +62,7 @@ public class NicProfile implements InternalIdentity, Serializable {
String iPv4Dns1;
String iPv4Dns2;
String requestedIPv4;
+ boolean ipv4AllocationRaceCheck;
// IPv6
String iPv6Address;
@@ -405,6 +406,13 @@ public void setMtu(Integer mtu) {
this.mtu = mtu;
}
+ public boolean getIpv4AllocationRaceCheck() {
+ return this.ipv4AllocationRaceCheck;
+ }
+
+ public void setIpv4AllocationRaceCheck(boolean ipv4AllocationRaceCheck) {
+ this.ipv4AllocationRaceCheck = ipv4AllocationRaceCheck;
+ }
//
// OTHER METHODS
diff --git a/api/src/main/java/com/cloud/vm/UserVmService.java b/api/src/main/java/com/cloud/vm/UserVmService.java
index 9d8b196a4ff5..5a9301eef7fa 100644
--- a/api/src/main/java/com/cloud/vm/UserVmService.java
+++ b/api/src/main/java/com/cloud/vm/UserVmService.java
@@ -42,9 +42,11 @@
import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd;
import com.cloud.dc.DataCenter;
+import com.cloud.deploy.DeploymentPlanner;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.OperationTimedoutException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.exception.StorageUnavailableException;
@@ -66,10 +68,7 @@ public interface UserVmService {
/**
* Destroys one virtual machine
*
- * @param userId
- * the id of the user performing the action
- * @param vmId
- * the id of the virtual machine.
+ * @param cmd the API Command Object containg the parameters to use for this service action
* @throws ConcurrentOperationException
* @throws ResourceUnavailableException
*/
@@ -112,6 +111,12 @@ UserVm startVirtualMachine(StartVMCmd cmd) throws StorageUnavailableException, E
UserVm rebootVirtualMachine(RebootVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ResourceAllocationException;
+ void startVirtualMachine(UserVm vm) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException;
+
+ void startVirtualMachineForHA(VirtualMachine vm, Map params,
+ DeploymentPlanner planner) throws InsufficientCapacityException, ResourceUnavailableException,
+ ConcurrentOperationException, OperationTimedoutException;
+
UserVm updateVirtualMachine(UpdateVMCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException;
/**
@@ -148,14 +153,6 @@ UserVm startVirtualMachine(StartVMCmd cmd) throws StorageUnavailableException, E
* Creates a Basic Zone User VM in the database and returns the VM to the
* caller.
*
- *
- *
- * @param sshKeyPair
- * - name of the ssh key pair used to login to the virtual
- * machine
- * @param cpuSpeed
- * @param memory
- * @param cpuNumber
* @param zone
* - availability zone for the virtual machine
* @param serviceOffering
@@ -231,9 +228,6 @@ UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering s
* Creates a User VM in Advanced Zone (Security Group feature is enabled) in
* the database and returns the VM to the caller.
*
- *
- *
- * @param type
* @param zone
* - availability zone for the virtual machine
* @param serviceOffering
@@ -309,14 +303,6 @@ UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, ServiceOfferin
* Creates a User VM in Advanced Zone (Security Group feature is disabled)
* in the database and returns the VM to the caller.
*
- *
- *
- * @param sshKeyPair
- * - name of the ssh key pair used to login to the virtual
- * machine
- * @param cpuSpeed
- * @param memory
- * @param cpuNumber
* @param zone
* - availability zone for the virtual machine
* @param serviceOffering
diff --git a/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java b/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java
index f2ff3da8449e..c67ee4eabc28 100644
--- a/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java
+++ b/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java
@@ -192,6 +192,10 @@ public boolean equals(Object obj) {
Map getParameters();
+ void setCpuOvercommitRatio(Float cpuOvercommitRatio);
+
+ void setMemoryOvercommitRatio(Float memoryOvercommitRatio);
+
Float getCpuOvercommitRatio();
Float getMemoryOvercommitRatio();
diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java b/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java
index aafc039b36b5..938936765167 100644
--- a/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java
+++ b/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java
@@ -17,7 +17,9 @@
package org.apache.cloudstack.api;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import org.apache.cloudstack.region.PortableIp;
import org.apache.commons.collections.CollectionUtils;
@@ -81,15 +83,22 @@ public enum ApiCommandResourceType {
ManagementServer(org.apache.cloudstack.management.ManagementServerHost.class),
ObjectStore(org.apache.cloudstack.storage.object.ObjectStore.class),
Bucket(org.apache.cloudstack.storage.object.Bucket.class),
- QuotaTariff(org.apache.cloudstack.quota.QuotaTariff.class);
+ QuotaTariff(org.apache.cloudstack.quota.QuotaTariff.class),
+ KubernetesCluster(null),
+ KubernetesSupportedVersion(null);
private final Class> clazz;
+ static final Map> additionalClassMappings = new HashMap<>();
+
private ApiCommandResourceType(Class> clazz) {
this.clazz = clazz;
}
public Class> getAssociatedClass() {
+ if (this.clazz == null && additionalClassMappings.containsKey(this)) {
+ return additionalClassMappings.get(this);
+ }
return this.clazz;
}
@@ -119,4 +128,8 @@ public static ApiCommandResourceType fromString(String value) {
}
return null;
}
+
+ public static void setClassMapping(ApiCommandResourceType type, Class> clazz) {
+ additionalClassMappings.put(type, clazz);
+ }
}
diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java
index 6dfcf6561244..6db1ed06eff5 100644
--- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java
+++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java
@@ -175,6 +175,8 @@ public class ApiConstants {
public static final String END_IPV6 = "endipv6";
public static final String END_PORT = "endport";
public static final String ENTRY_TIME = "entrytime";
+ public static final String EVENT_ID = "eventid";
+ public static final String EVENT_TYPE = "eventtype";
public static final String EXPIRES = "expires";
public static final String EXTRA_CONFIG = "extraconfig";
public static final String EXTRA_DHCP_OPTION = "extradhcpoption";
@@ -189,6 +191,7 @@ public class ApiConstants {
public static final String FORCED = "forced";
public static final String FORCED_DESTROY_LOCAL_STORAGE = "forcedestroylocalstorage";
public static final String FORCE_DELETE_HOST = "forcedeletehost";
+ public static final String FORCE_MS_TO_IMPORT_VM_FILES = "forcemstoimportvmfiles";
public static final String FORMAT = "format";
public static final String FOR_VIRTUAL_NETWORK = "forvirtualnetwork";
public static final String FOR_SYSTEM_VMS = "forsystemvms";
@@ -209,6 +212,7 @@ public class ApiConstants {
public static final String HA_PROVIDER = "haprovider";
public static final String HA_STATE = "hastate";
public static final String HEALTH = "health";
+ public static final String HEADERS = "headers";
public static final String HIDE_IP_ADDRESS_USAGE = "hideipaddressusage";
public static final String HOST_ID = "hostid";
public static final String HOST_IDS = "hostids";
@@ -236,6 +240,7 @@ public class ApiConstants {
public static final String NEXT_ACL_RULE_ID = "nextaclruleid";
public static final String MOVE_ACL_CONSISTENCY_HASH = "aclconsistencyhash";
public static final String IMAGE_PATH = "imagepath";
+ public static final String INSTANCE_CONVERSION_SUPPORTED = "instanceconversionsupported";
public static final String INTERNAL_DNS1 = "internaldns1";
public static final String INTERNAL_DNS2 = "internaldns2";
public static final String INTERNET_PROTOCOL = "internetprotocol";
@@ -263,8 +268,10 @@ public class ApiConstants {
public static final String IS_CLEANUP_REQUIRED = "iscleanuprequired";
public static final String IS_DYNAMIC = "isdynamic";
public static final String IS_EDGE = "isedge";
+ public static final String IS_ENCRYPTED = "isencrypted";
public static final String IS_EXTRACTABLE = "isextractable";
public static final String IS_FEATURED = "isfeatured";
+ public static final String IS_IMPLICIT = "isimplicit";
public static final String IS_PORTABLE = "isportable";
public static final String IS_PUBLIC = "ispublic";
public static final String IS_PERSISTENT = "ispersistent";
@@ -280,6 +287,7 @@ public class ApiConstants {
public static final String JOB_STATUS = "jobstatus";
public static final String KEEPALIVE_ENABLED = "keepaliveenabled";
public static final String KERNEL_VERSION = "kernelversion";
+ public static final String KEY = "key";
public static final String LABEL = "label";
public static final String LASTNAME = "lastname";
public static final String LAST_BOOT = "lastboottime";
@@ -354,6 +362,7 @@ public class ApiConstants {
public static final String SSHKEY_ENABLED = "sshkeyenabled";
public static final String PATH = "path";
public static final String PAYLOAD = "payload";
+ public static final String PAYLOAD_URL = "payloadurl";
public static final String POD_ID = "podid";
public static final String POD_NAME = "podname";
public static final String POD_IDS = "podids";
@@ -381,6 +390,7 @@ public class ApiConstants {
public static final String PUBLIC_START_PORT = "publicport";
public static final String PUBLIC_END_PORT = "publicendport";
public static final String PUBLIC_ZONE = "publiczone";
+ public static final String PURGE_RESOURCES = "purgeresources";
public static final String RECEIVED_BYTES = "receivedbytes";
public static final String RECONNECT = "reconnect";
public static final String RECOVER = "recover";
@@ -399,11 +409,9 @@ public class ApiConstants {
public static final String QUERY_FILTER = "queryfilter";
public static final String SCHEDULE = "schedule";
public static final String SCOPE = "scope";
- public static final String SECRET_KEY = "usersecretkey";
- public static final String SECONDARY_IP = "secondaryip";
- public static final String SINCE = "since";
- public static final String KEY = "key";
public static final String SEARCH_BASE = "searchbase";
+ public static final String SECONDARY_IP = "secondaryip";
+ public static final String SECRET_KEY = "secretkey";
public static final String SECURITY_GROUP_IDS = "securitygroupids";
public static final String SECURITY_GROUP_NAMES = "securitygroupnames";
public static final String SECURITY_GROUP_NAME = "securitygroupname";
@@ -421,6 +429,7 @@ public class ApiConstants {
public static final String SHOW_UNIQUE = "showunique";
public static final String SIGNATURE = "signature";
public static final String SIGNATURE_VERSION = "signatureversion";
+ public static final String SINCE = "since";
public static final String SIZE = "size";
public static final String SNAPSHOT = "snapshot";
public static final String SNAPSHOT_ID = "snapshotid";
@@ -428,8 +437,7 @@ public class ApiConstants {
public static final String SNAPSHOT_TYPE = "snapshottype";
public static final String SNAPSHOT_QUIESCEVM = "quiescevm";
public static final String SOURCE_ZONE_ID = "sourcezoneid";
- public static final String SUITABLE_FOR_VM = "suitableforvirtualmachine";
- public static final String SUPPORTS_STORAGE_SNAPSHOT = "supportsstoragesnapshot";
+ public static final String SSL_VERIFICATION = "sslverification";
public static final String START_DATE = "startdate";
public static final String START_ID = "startid";
public static final String START_IP = "startip";
@@ -442,18 +450,23 @@ public class ApiConstants {
public static final String STORAGE_POLICY = "storagepolicy";
public static final String STORAGE_MOTION_ENABLED = "storagemotionenabled";
public static final String STORAGE_CAPABILITIES = "storagecapabilities";
+ public static final String STORAGE_CUSTOM_STATS = "storagecustomstats";
public static final String SUBNET = "subnet";
public static final String OWNER = "owner";
public static final String SWAP_OWNER = "swapowner";
public static final String SYSTEM_VM_TYPE = "systemvmtype";
public static final String TAGS = "tags";
public static final String STORAGE_TAGS = "storagetags";
+ public static final String SUCCESS = "success";
+ public static final String SUITABLE_FOR_VM = "suitableforvirtualmachine";
+ public static final String SUPPORTS_STORAGE_SNAPSHOT = "supportsstoragesnapshot";
public static final String TARGET_IQN = "targetiqn";
public static final String TEMPLATE_FILTER = "templatefilter";
public static final String TEMPLATE_ID = "templateid";
public static final String TEMPLATE_IDS = "templateids";
public static final String TEMPLATE_NAME = "templatename";
public static final String TEMPLATE_TYPE = "templatetype";
+ public static final String TEMPLATE_FORMAT = "templateformat";
public static final String TIMEOUT = "timeout";
public static final String TIMEZONE = "timezone";
public static final String TIMEZONEOFFSET = "timezoneoffset";
@@ -480,6 +493,7 @@ public class ApiConstants {
public static final String USERNAME = "username";
public static final String USER_CONFIGURABLE = "userconfigurable";
public static final String USER_SECURITY_GROUP_LIST = "usersecuritygrouplist";
+ public static final String USER_SECRET_KEY = "usersecretkey";
public static final String USE_VIRTUAL_NETWORK = "usevirtualnetwork";
public static final String UPDATE_IN_SEQUENCE = "updateinsequence";
public static final String VALUE = "value";
@@ -559,6 +573,7 @@ public class ApiConstants {
public static final String ALLOCATION_STATE = "allocationstate";
public static final String MANAGED_STATE = "managedstate";
public static final String MANAGEMENT_SERVER_ID = "managementserverid";
+ public static final String MANAGEMENT_SERVER_NAME = "managementservername";
public static final String STORAGE = "storage";
public static final String STORAGE_ID = "storageid";
public static final String PING_STORAGE_SERVER_IP = "pingstorageserverip";
@@ -898,6 +913,7 @@ public class ApiConstants {
public static final String AUTOSCALE_VMGROUP_NAME = "autoscalevmgroupname";
public static final String BAREMETAL_DISCOVER_NAME = "baremetaldiscovername";
public static final String BAREMETAL_RCT_URL = "baremetalrcturl";
+ public static final String BATCH_SIZE = "batchsize";
public static final String UCS_DN = "ucsdn";
public static final String GSLB_PROVIDER = "gslbprovider";
public static final String EXCLUSIVE_GSLB_PROVIDER = "isexclusivegslbprovider";
@@ -1119,6 +1135,11 @@ public class ApiConstants {
public static final String PARAMETER_DESCRIPTION_IS_TAG_A_RULE = "Whether the informed tag is a JS interpretable rule or not.";
+ public static final String WEBHOOK_ID = "webhookid";
+ public static final String WEBHOOK_NAME = "webhookname";
+
+ public static final String NFS_MOUNT_OPTIONS = "nfsmountopts";
+
/**
* This enum specifies IO Drivers, each option controls specific policies on I/O.
* Qemu guests support "threads" and "native" options Since 0.8.8 ; "io_uring" is supported Since 6.3.0 (QEMU 5.0).
@@ -1141,6 +1162,14 @@ public String toString() {
}
}
+ public static final String PARAMETER_DESCRIPTION_START_DATE_POSSIBLE_FORMATS = "The recommended format is \"yyyy-MM-dd'T'HH:mm:ssZ\" (e.g.: \"2023-01-01T12:00:00+0100\"); " +
+ "however, the following formats are also accepted: \"yyyy-MM-dd HH:mm:ss\" (e.g.: \"2023-01-01 12:00:00\") and \"yyyy-MM-dd\" (e.g.: \"2023-01-01\" - if the time is not " +
+ "added, it will be interpreted as \"00:00:00\"). If the recommended format is not used, the date will be considered in the server timezone.";
+
+ public static final String PARAMETER_DESCRIPTION_END_DATE_POSSIBLE_FORMATS = "The recommended format is \"yyyy-MM-dd'T'HH:mm:ssZ\" (e.g.: \"2023-01-01T12:00:00+0100\"); " +
+ "however, the following formats are also accepted: \"yyyy-MM-dd HH:mm:ss\" (e.g.: \"2023-01-01 12:00:00\") and \"yyyy-MM-dd\" (e.g.: \"2023-01-01\" - if the time is not " +
+ "added, it will be interpreted as \"23:59:59\"). If the recommended format is not used, the date will be considered in the server timezone.";
+
public enum BootType {
UEFI, BIOS;
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java
index 50984188bf56..01f7af108416 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java
@@ -43,6 +43,12 @@ public class UpdateHypervisorCapabilitiesCmd extends BaseCmd {
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = HypervisorCapabilitiesResponse.class, description = "ID of the hypervisor capability")
private Long id;
+ @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "the hypervisor for which the hypervisor capabilities are to be updated", since = "4.19.1")
+ private String hypervisor;
+
+ @Parameter(name = ApiConstants.HYPERVISOR_VERSION, type = CommandType.STRING, description = "the hypervisor version for which the hypervisor capabilities are to be updated", since = "4.19.1")
+ private String hypervisorVersion;
+
@Parameter(name = ApiConstants.SECURITY_GROUP_EANBLED, type = CommandType.BOOLEAN, description = "set true to enable security group for this hypervisor.")
private Boolean securityGroupEnabled;
@@ -73,6 +79,14 @@ public Long getId() {
return id;
}
+ public String getHypervisor() {
+ return hypervisor;
+ }
+
+ public String getHypervisorVersion() {
+ return hypervisorVersion;
+ }
+
public Long getMaxGuestsLimit() {
return maxGuestsLimit;
}
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java
index 4562aa7da19e..8f6d5413d72d 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java
@@ -54,7 +54,11 @@ public class CreateServiceOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.CPU_NUMBER, type = CommandType.INTEGER, required = false, description = "the CPU number of the service offering")
private Integer cpuNumber;
- @Parameter(name = ApiConstants.CPU_SPEED, type = CommandType.INTEGER, required = false, description = "the CPU speed of the service offering in MHz.")
+ @Parameter(name = ApiConstants.CPU_SPEED, type = CommandType.INTEGER, required = false, description = "For VMware and Xen based hypervisors this is the CPU speed of the service offering in MHz.\n" +
+ "For the KVM hypervisor," +
+ " the values of the parameters cpuSpeed and cpuNumber will be used to calculate the `shares` value. This value is used by the KVM hypervisor to calculate how much time" +
+ " the VM will have access to the host's CPU. The `shares` value does not have a unit, and its purpose is being a weight value for the host to compare between its guest" +
+ " VMs. For more information, see https://libvirt.org/formatdomain.html#cpu-tuning.")
private Integer cpuSpeed;
@Parameter(name = ApiConstants.DISPLAY_TEXT, type = CommandType.STRING, description = "The display text of the service offering, defaults to 'name'.")
@@ -242,6 +246,12 @@ public class CreateServiceOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.ENCRYPT_ROOT, type = CommandType.BOOLEAN, description = "VMs using this offering require root volume encryption", since="4.18")
private Boolean encryptRoot;
+ @Parameter(name = ApiConstants.PURGE_RESOURCES, type = CommandType.BOOLEAN,
+ description = "Whether to cleanup instance and its associated resource from database upon expunge of the instance",
+ since="4.20")
+ private Boolean purgeResources;
+
+
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
@@ -269,7 +279,7 @@ public Integer getMemory() {
public String getServiceOfferingName() {
if (StringUtils.isEmpty(serviceOfferingName)) {
- throw new InvalidParameterValueException("Failed to create service offering because offering name has not been spified.");
+ throw new InvalidParameterValueException("Failed to create service offering because offering name has not been specified.");
}
return serviceOfferingName;
}
@@ -477,6 +487,10 @@ public boolean getEncryptRoot() {
return false;
}
+ public boolean isPurgeResources() {
+ return Boolean.TRUE.equals(purgeResources);
+ }
+
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java
index 7d6bae860834..e580f0d9f41a 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java
@@ -89,6 +89,11 @@ public class UpdateServiceOfferingCmd extends BaseCmd {
description = "state of the service offering")
private String serviceOfferingState;
+ @Parameter(name = ApiConstants.PURGE_RESOURCES, type = CommandType.BOOLEAN,
+ description = "Whether to cleanup VM and its associated resource upon expunge",
+ since="4.20")
+ private Boolean purgeResources;
+
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@@ -185,6 +190,10 @@ public State getState() {
return state;
}
+ public boolean isPurgeResources() {
+ return Boolean.TRUE.equals(purgeResources);
+ }
+
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/PurgeExpungedResourcesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/PurgeExpungedResourcesCmd.java
new file mode 100644
index 000000000000..b6833f097336
--- /dev/null
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/PurgeExpungedResourcesCmd.java
@@ -0,0 +1,131 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.api.command.admin.resource;
+
+
+import java.util.Date;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseAsyncCmd;
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ResponseObject;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.PurgeExpungedResourcesResponse;
+import org.apache.cloudstack.api.response.SuccessResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.cloudstack.resource.ResourceCleanupService;
+
+import com.cloud.event.EventTypes;
+
+@APICommand(name = "purgeExpungedResources",
+ description = "Purge expunged resources",
+ responseObject = SuccessResponse.class,
+ responseView = ResponseObject.ResponseView.Full,
+ requestHasSensitiveInfo = false,
+ responseHasSensitiveInfo = false,
+ authorized = {RoleType.Admin},
+ since = "4.20")
+public class PurgeExpungedResourcesCmd extends BaseAsyncCmd {
+
+ @Inject
+ ResourceCleanupService resourceCleanupService;
+
+ /////////////////////////////////////////////////////
+ //////////////// API parameters /////////////////////
+ /////////////////////////////////////////////////////
+
+ @Parameter(name = ApiConstants.RESOURCE_TYPE, type = BaseCmd.CommandType.STRING,
+ description = "The type of the resource which need to be purged. Supported types: " +
+ "VirtualMachine")
+ private String resourceType;
+
+ @Parameter(name = ApiConstants.BATCH_SIZE, type = CommandType.LONG,
+ description = "The size of batch used during purging")
+ private Long batchSize;
+
+ @Parameter(name = ApiConstants.START_DATE,
+ type = CommandType.DATE,
+ description = "The start date range of the expunged resources used for purging " +
+ "(use format \"yyyy-MM-dd\" or \"yyyy-MM-dd HH:mm:ss\")")
+ private Date startDate;
+
+ @Parameter(name = ApiConstants.END_DATE,
+ type = CommandType.DATE,
+ description = "The end date range of the expunged resources used for purging " +
+ "(use format \"yyyy-MM-dd\" or \"yyyy-MM-dd HH:mm:ss\")")
+ private Date endDate;
+
+ /////////////////////////////////////////////////////
+ /////////////////// Accessors ///////////////////////
+ /////////////////////////////////////////////////////
+
+
+ public String getResourceType() {
+ return resourceType;
+ }
+
+ public Long getBatchSize() {
+ return batchSize;
+ }
+
+ public Date getStartDate() {
+ return startDate;
+ }
+
+ public Date getEndDate() {
+ return endDate;
+ }
+
+ @Override
+ public long getEntityOwnerId() {
+ return CallContext.current().getCallingAccount().getId();
+ }
+
+ @Override
+ public String getEventType() {
+ return EventTypes.EVENT_PURGE_EXPUNGED_RESOURCES;
+ }
+
+ @Override
+ public String getEventDescription() {
+ return "Purging expunged resources";
+ }
+
+ /////////////////////////////////////////////////////
+ /////////////// API Implementation///////////////////
+ /////////////////////////////////////////////////////
+
+ @Override
+ public void execute() {
+ try {
+ long result = resourceCleanupService.purgeExpungedResources(this);
+ PurgeExpungedResourcesResponse response = new PurgeExpungedResourcesResponse();
+ response.setResourceCount(result);
+ response.setObjectName(getCommandName().toLowerCase());
+ setResponseObject(response);
+ } catch (Exception e) {
+ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getLocalizedMessage());
+ }
+ }
+}
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ChangeStoragePoolScopeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ChangeStoragePoolScopeCmd.java
new file mode 100644
index 000000000000..d3b6a0746106
--- /dev/null
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ChangeStoragePoolScopeCmd.java
@@ -0,0 +1,98 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.api.command.admin.storage;
+
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiCommandResourceType;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseAsyncCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.response.ClusterResponse;
+import org.apache.cloudstack.api.response.StoragePoolResponse;
+import org.apache.cloudstack.api.response.SuccessResponse;
+import org.apache.cloudstack.context.CallContext;
+
+import com.cloud.event.EventTypes;
+import com.cloud.storage.StoragePool;
+
+@APICommand(name = "changeStoragePoolScope", description = "Changes the scope of a storage pool when the pool is in Disabled state." +
+ "This feature is officially tested and supported for Hypervisors: KVM and VMware, Protocols: NFS and Ceph, and Storage Provider: DefaultPrimary. " +
+ "There might be extra steps involved to make this work for other hypervisors and storage options.",
+ responseObject = SuccessResponse.class, since= "4.19.1", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
+public class ChangeStoragePoolScopeCmd extends BaseAsyncCmd {
+
+ @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, required = true, description = "the Id of the storage pool")
+ private Long id;
+
+ @Parameter(name = ApiConstants.SCOPE, type = CommandType.STRING, required = true, description = "the scope of the storage: cluster or zone")
+ private String scope;
+
+ @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "the Id of the cluster to use if scope is being set to Cluster")
+ private Long clusterId;
+
+ @Override
+ public ApiCommandResourceType getApiResourceType() {
+ return ApiCommandResourceType.StoragePool;
+ }
+
+ @Override
+ public Long getApiResourceId() {
+ return getId();
+ }
+
+ public String getEventType() {
+ return EventTypes.EVENT_CHANGE_STORAGE_POOL_SCOPE;
+ }
+
+ @Override
+ public String getEventDescription() {
+ String description = "Change storage pool scope. Storage pool Id: ";
+ StoragePool pool = _entityMgr.findById(StoragePool.class, getId());
+ if (pool != null) {
+ description += pool.getUuid();
+ } else {
+ description += getId();
+ }
+ description += " to " + getScope();
+ return description;
+ }
+
+ @Override
+ public void execute() {
+ _storageService.changeStoragePoolScope(this);
+ SuccessResponse response = new SuccessResponse(getCommandName());
+ this.setResponseObject(response);
+ }
+
+ @Override
+ public long getEntityOwnerId() {
+ return CallContext.current().getCallingAccountId();
+ }
+
+ public Long getId() {
+ return id;
+ }
+
+ public String getScope() {
+ return scope;
+ }
+
+ public Long getClusterId() {
+ return clusterId;
+ }
+}
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java
index 293ed3103cbc..57a87939b6bd 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java
@@ -72,7 +72,8 @@ public class ListStoragePoolsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "host ID of the storage pools")
private Long hostId;
-
+ @Parameter(name = ApiConstants.STORAGE_CUSTOM_STATS, type = CommandType.BOOLEAN, description = "If true, lists the custom stats of the storage pool", since = "4.18.1")
+ private Boolean customStats;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@@ -129,6 +130,10 @@ public void setScope(String scope) {
this.scope = scope;
}
+ public Boolean getCustomStats() {
+ return customStats != null && customStats;
+ }
+
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java
index bcc438b957bf..0e1631a46ba2 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java
@@ -39,10 +39,17 @@ public class UpdateImageStoreCmd extends BaseCmd {
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, required = true, description = "Image Store UUID")
private Long id;
- @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = true, description = "If set to true, it designates the corresponding image store to read-only, " +
- "hence not considering them during storage migration")
+ @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = false, description = "The new name for the Image Store.")
+ private String name;
+
+ @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = false,
+ description = "If set to true, it designates the corresponding image store to read-only, hence not considering them during storage migration")
private Boolean readonly;
+ @Parameter(name = ApiConstants.CAPACITY_BYTES, type = CommandType.LONG, required = false,
+ description = "The number of bytes CloudStack can use on this image storage.\n\tNOTE: this will be overwritten by the StatsCollector as soon as there is a SSVM to query the storage.")
+ private Long capacityBytes;
+
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@@ -51,17 +58,25 @@ public Long getId() {
return id;
}
+ public String getName() {
+ return name;
+ }
+
public Boolean getReadonly() {
return readonly;
}
+ public Long getCapacityBytes() {
+ return capacityBytes;
+ }
+
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() {
- ImageStore result = _storageService.updateImageStoreStatus(getId(), getReadonly());
+ ImageStore result = _storageService.updateImageStore(this);
ImageStoreResponse storeResponse = null;
if (result != null) {
storeResponse = _responseGenerator.createImageStoreResponse(result);
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageRecordsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageRecordsCmd.java
index 3cb148c2af03..9ce1fcb2bc99 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageRecordsCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageRecordsCmd.java
@@ -53,16 +53,12 @@ public class ListUsageRecordsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "List usage records for the specified domain.")
private Long domainId;
- @Parameter(name = ApiConstants.END_DATE,
- type = CommandType.DATE,
- required = true,
- description = "End date range for usage record query (use format \"yyyy-MM-dd\" or the new format \"yyyy-MM-dd HH:mm:ss\", e.g. startDate=2015-01-01 or startdate=2015-01-01 10:30:00).")
+ @Parameter(name = ApiConstants.END_DATE, type = CommandType.DATE, required = true, description = "End date range for usage record query. " +
+ ApiConstants.PARAMETER_DESCRIPTION_END_DATE_POSSIBLE_FORMATS)
private Date endDate;
- @Parameter(name = ApiConstants.START_DATE,
- type = CommandType.DATE,
- required = true,
- description = "Start date range for usage record query (use format \"yyyy-MM-dd\" or the new format \"yyyy-MM-dd HH:mm:ss\", e.g. startDate=2015-01-01 or startdate=2015-01-01 11:00:00).")
+ @Parameter(name = ApiConstants.START_DATE, type = CommandType.DATE, required = true, description = "Start date range for usage record query. " +
+ ApiConstants.PARAMETER_DESCRIPTION_START_DATE_POSSIBLE_FORMATS)
private Date startDate;
@Parameter(name = ApiConstants.ACCOUNT_ID, type = CommandType.UUID, entityType = AccountResponse.class, description = "List usage records for the specified account")
@@ -137,11 +133,11 @@ public void setDomainId(Long domainId) {
}
public void setEndDate(Date endDate) {
- this.endDate = endDate == null ? null : new Date(endDate.getTime());
+ this.endDate = endDate;
}
public void setStartDate(Date startDate) {
- this.startDate = startDate == null ? null : new Date(startDate.getTime());
+ this.startDate = startDate;
}
public void setAccountId(Long accountId) {
@@ -167,8 +163,8 @@ public Boolean isRecursive() {
@Override
public void execute() {
Pair, Integer> usageRecords = _usageService.getUsageRecords(this);
- ListResponse response = new ListResponse();
- List usageResponses = new ArrayList();
+ ListResponse response = new ListResponse<>();
+ List usageResponses = new ArrayList<>();
Map> resourceTagResponseMap = null;
if (usageRecords != null) {
//read the resource tags details for all the resources in usage data and store in Map
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java
index 3f8d386d2669..c9e1e934152d 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java
@@ -66,7 +66,7 @@ public class UpdateUserCmd extends BaseCmd {
@Parameter(name = ApiConstants.CURRENT_PASSWORD, type = CommandType.STRING, description = "Current password that was being used by the user. You must inform the current password when updating the password.", acceptedOnAdminPort = false)
private String currentPassword;
- @Parameter(name = ApiConstants.SECRET_KEY, type = CommandType.STRING, description = "The secret key for the user. Must be specified with userApiKey")
+ @Parameter(name = ApiConstants.USER_SECRET_KEY, type = CommandType.STRING, description = "The secret key for the user. Must be specified with userApiKey")
private String secretKey;
@Parameter(name = ApiConstants.TIMEZONE,
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java
index dd897218a4d3..ae6ceff26c7d 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java
@@ -201,8 +201,8 @@ public Map getNicNetworkList() {
for (Map entry : (Collection
+
+
+
+
diff --git a/core/src/main/resources/META-INF/cloudstack/event/module.properties b/core/src/main/resources/META-INF/cloudstack/event/module.properties
new file mode 100644
index 000000000000..ab1f88e98448
--- /dev/null
+++ b/core/src/main/resources/META-INF/cloudstack/event/module.properties
@@ -0,0 +1,21 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+name=event
+parent=core
diff --git a/core/src/main/resources/META-INF/cloudstack/event/spring-core-lifecycle-event-context-inheritable.xml b/core/src/main/resources/META-INF/cloudstack/event/spring-core-lifecycle-event-context-inheritable.xml
new file mode 100644
index 000000000000..63d11c65bacb
--- /dev/null
+++ b/core/src/main/resources/META-INF/cloudstack/event/spring-core-lifecycle-event-context-inheritable.xml
@@ -0,0 +1,31 @@
+
+
+
+
+
+
+
+
diff --git a/core/src/main/resources/META-INF/cloudstack/kubernetes/spring-core-lifecycle-kubernetes-context-inheritable.xml b/core/src/main/resources/META-INF/cloudstack/kubernetes/spring-core-lifecycle-kubernetes-context-inheritable.xml
index df1a4b5c2298..96a9a634bae8 100644
--- a/core/src/main/resources/META-INF/cloudstack/kubernetes/spring-core-lifecycle-kubernetes-context-inheritable.xml
+++ b/core/src/main/resources/META-INF/cloudstack/kubernetes/spring-core-lifecycle-kubernetes-context-inheritable.xml
@@ -25,8 +25,8 @@
>
-
-
+
+
diff --git a/debian/control b/debian/control
index 3508c7b5f754..dab7b254b88c 100644
--- a/debian/control
+++ b/debian/control
@@ -24,7 +24,7 @@ Description: CloudStack server library
Package: cloudstack-agent
Architecture: all
-Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, lsb-release, ufw, apparmor
+Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, lsb-release, ufw, apparmor, cpu-checker
Recommends: init-system-helpers
Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts
Description: CloudStack agent
diff --git a/debian/rules b/debian/rules
index f8228e61e464..b52803702724 100755
--- a/debian/rules
+++ b/debian/rules
@@ -103,6 +103,8 @@ override_dh_auto_install:
install -m0644 packaging/systemd/$(PACKAGE)-management.service debian/$(PACKAGE)-management/lib/systemd/system/$(PACKAGE)-management.service
install -m0644 packaging/systemd/$(PACKAGE)-management.default $(DESTDIR)/$(SYSCONFDIR)/default/$(PACKAGE)-management
+ install -D -m0644 server/target/conf/cloudstack-management.logrotate $(DESTDIR)/$(SYSCONFDIR)/logrotate.d/cloudstack-management
+
# cloudstack-ui
mkdir $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/ui
mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-ui
@@ -159,6 +161,8 @@ override_dh_auto_install:
install -m0644 packaging/systemd/$(PACKAGE)-usage.service debian/$(PACKAGE)-usage/lib/systemd/system/$(PACKAGE)-usage.service
install -m0644 packaging/systemd/$(PACKAGE)-usage.default $(DESTDIR)/$(SYSCONFDIR)/default/$(PACKAGE)-usage
+ install -D -m0644 usage/target/transformed/cloudstack-usage.logrotate $(DESTDIR)/$(SYSCONFDIR)/logrotate.d/cloudstack-usage
+
# cloudstack-marvin
mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-marvin
cp tools/marvin/dist/Marvin-*.tar.gz $(DESTDIR)/usr/share/$(PACKAGE)-marvin/
diff --git a/deps/install-non-oss.sh b/deps/install-non-oss.sh
index c6b91e07cec4..ea40e9a55634 100755
--- a/deps/install-non-oss.sh
+++ b/deps/install-non-oss.sh
@@ -8,7 +8,7 @@
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java
index 110592161f96..41bd74f11924 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java
@@ -348,4 +348,6 @@ void implementNetworkElementsAndResources(DeployDestination dest, ReservationCon
Pair importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, final DataCenter datacenter, boolean forced) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException;
void unmanageNics(VirtualMachineProfile vm);
+
+ void expungeLbVmRefs(List vmIds, Long batchSize);
}
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
index c3525466ce19..7950dda4d68e 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
@@ -22,6 +22,7 @@
import java.util.Map;
import java.util.Set;
+import com.cloud.exception.ResourceAllocationException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
@@ -126,7 +127,7 @@ DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Lon
void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest);
- void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException;
+ void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException, ResourceAllocationException;
boolean canVmRestartOnAnotherServer(long vmId);
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java
index 091c09d7a4d0..fe052f016065 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java
@@ -50,4 +50,6 @@ public interface DataObject {
void decRefCount();
Long getRefCount();
+
+ String getName();
}
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
index 2c7d3c602783..d52c656f6dbc 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
@@ -18,6 +18,8 @@
*/
package org.apache.cloudstack.engine.subsystem.api.storage;
+import java.util.Map;
+
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.command.CommandResult;
@@ -86,6 +88,22 @@ default boolean requiresAccessForMigration(DataObject dataObject) {
*/
boolean canProvideStorageStats();
+ /**
+ * intended for managed storage
+ * returns true if the storage can provide its custom stats
+ */
+ default boolean poolProvidesCustomStorageStats() {
+ return false;
+ }
+
+ /**
+ * intended for managed storage
+ * returns the custom stats if the storage can provide them
+ */
+ default Map getCustomStorageStats(StoragePool pool) {
+ return null;
+ }
+
/**
* intended for managed storage
* returns the total capacity and used size in bytes
@@ -110,6 +128,14 @@ default boolean requiresAccessForMigration(DataObject dataObject) {
*/
boolean canHostAccessStoragePool(Host host, StoragePool pool);
+ /**
+ * intended for managed storage
+ * returns true if the host can prepare storage client to provide access the storage pool
+ */
+ default boolean canHostPrepareStoragePoolAccess(Host host, StoragePool pool) {
+ return false;
+ }
+
/**
* Used by storage pools which want to keep VMs' information
* @return true if additional VM info is needed (intended for storage pools).
@@ -157,4 +183,20 @@ default boolean volumesRequireGrantAccessWhenUsed() {
default boolean zoneWideVolumesAvailableWithoutClusterMotion() {
return false;
}
+
+ /**
+ * This method returns the actual size required on the pool for a volume.
+ *
+ * @param volumeSize
+ * Size of volume to be created on the store
+ * @param templateSize
+ * Size of template, if any, which will be used to create the volume
+ * @param isEncryptionRequired
+ * true if volume is encrypted
+ *
+ * @return the size required on the pool for the volume
+ */
+ default long getVolumeSizeRequiredOnPool(long volumeSize, Long templateSize, boolean isEncryptionRequired) {
+ return volumeSize;
+ }
}
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java
index fcbc19c28b7b..54f3c63f8d73 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java
@@ -20,6 +20,7 @@
import java.util.Map;
+import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.StoragePool;
public interface PrimaryDataStoreLifeCycle extends DataStoreLifeCycle {
@@ -29,4 +30,6 @@ public interface PrimaryDataStoreLifeCycle extends DataStoreLifeCycle {
void updateStoragePool(StoragePool storagePool, Map details);
void enableStoragePool(DataStore store);
void disableStoragePool(DataStore store);
+ void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, Hypervisor.HypervisorType hypervisorType);
+ void changeStoragePoolScopeToCluster(DataStore store, ClusterScope clusterScope, Hypervisor.HypervisorType hypervisorType);
}
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
index 7c4d56e12b92..682473ec94fc 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
@@ -121,4 +121,6 @@ boolean copyPoliciesBetweenVolumesAndDestroySourceVolumeAfterMigration(ObjectInD
Pair checkAndRepairVolume(VolumeInfo volume);
void checkAndRepairVolumeBasedOnConfig(DataObject dataObject, Host host);
+
+ void validateChangeDiskOfferingEncryptionType(long existingDiskOfferingId, long newDiskOfferingId);
}
diff --git a/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java b/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java
index ecdb59667c96..3d4e6579f7ca 100644
--- a/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java
+++ b/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java
@@ -38,8 +38,10 @@ public interface AlertManager extends Manager, AlertService {
public static final ConfigKey AlertSmtpUseStartTLS = new ConfigKey("Advanced", Boolean.class, "alert.smtp.useStartTLS", "false",
"If set to true and if we enable security via alert.smtp.useAuth, this will enable StartTLS to secure the connection.", true);
- public static final ConfigKey AlertSmtpEnabledSecurityProtocols = new ConfigKey("Advanced", String.class, "alert.smtp.enabledSecurityProtocols", "",
- "White-space separated security protocols; ex: \"TLSv1 TLSv1.1\". Supported protocols: SSLv2Hello, SSLv3, TLSv1, TLSv1.1 and TLSv1.2", true);
+ public static final ConfigKey AlertSmtpUseAuth = new ConfigKey<>(ConfigKey.CATEGORY_ALERT, Boolean.class, "alert.smtp.useAuth", "false", "If true, use SMTP authentication when sending emails.", false, ConfigKey.Scope.ManagementServer);
+
+ public static final ConfigKey AlertSmtpEnabledSecurityProtocols = new ConfigKey(ConfigKey.CATEGORY_ADVANCED, String.class, "alert.smtp.enabledSecurityProtocols", "",
+ "White-space separated security protocols; ex: \"TLSv1 TLSv1.1\". Supported protocols: SSLv2Hello, SSLv3, TLSv1, TLSv1.1 and TLSv1.2", true, ConfigKey.Kind.WhitespaceSeparatedListWithOptions, "SSLv2Hello,SSLv3,TLSv1,TLSv1.1,TLSv1.2");
public static final ConfigKey Ipv6SubnetCapacityThreshold = new ConfigKey("Advanced", Double.class,
"zone.virtualnetwork.ipv6subnet.capacity.notificationthreshold",
diff --git a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java
index ebbae0b31c28..c877ebbe8d2b 100644
--- a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java
+++ b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java
@@ -20,6 +20,7 @@
import java.util.Map;
import java.util.Set;
+import com.cloud.dc.VlanVO;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.impl.ConfigurationSubGroupVO;
@@ -60,9 +61,6 @@ public interface ConfigurationManager {
public static final String MESSAGE_CREATE_VLAN_IP_RANGE_EVENT = "Message.CreateVlanIpRange.Event";
public static final String MESSAGE_DELETE_VLAN_IP_RANGE_EVENT = "Message.DeleteVlanIpRange.Event";
- static final String VM_USERDATA_MAX_LENGTH_STRING = "vm.userdata.max.length";
- static final ConfigKey VM_USERDATA_MAX_LENGTH = new ConfigKey<>("Advanced", Integer.class, VM_USERDATA_MAX_LENGTH_STRING, "32768",
- "Max length of vm userdata after base64 decoding. Default is 32768 and maximum is 1048576", true);
public static final ConfigKey AllowNonRFC1918CompliantIPs = new ConfigKey<>(Boolean.class,
"allow.non.rfc1918.compliant.ips", "Advanced", "false",
"Allows non-compliant RFC 1918 IPs for Shared, Isolated networks and VPCs", true, null);
@@ -189,7 +187,7 @@ DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2,
* @param caller
* @return success/failure
*/
- boolean deleteVlanAndPublicIpRange(long userId, long vlanDbId, Account caller);
+ VlanVO deleteVlanAndPublicIpRange(long userId, long vlanDbId, Account caller);
void checkZoneAccess(Account caller, DataCenter zone);
diff --git a/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java b/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java
index 27f63c8c64b2..51d0846fafbc 100644
--- a/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java
+++ b/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java
@@ -25,15 +25,14 @@
import javax.annotation.PostConstruct;
import javax.inject.Inject;
-import org.apache.commons.collections.MapUtils;
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.LogManager;
-import org.springframework.beans.factory.NoSuchBeanDefinitionException;
-
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.events.Event;
import org.apache.cloudstack.framework.events.EventBus;
-import org.apache.cloudstack.framework.events.EventBusException;
+import org.apache.cloudstack.framework.events.EventDistributor;
+import org.apache.commons.collections.MapUtils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
@@ -50,6 +49,7 @@ public class UsageEventUtils {
protected static Logger LOGGER = LogManager.getLogger(UsageEventUtils.class);
protected static EventBus s_eventBus = null;
protected static ConfigurationDao s_configDao;
+ private static EventDistributor eventDistributor;
@Inject
UsageEventDao usageEventDao;
@@ -207,9 +207,9 @@ private static void publishUsageEvent(String usageEventType, Long accountId, Lon
if( !configValue)
return;
try {
- s_eventBus = ComponentContext.getComponent(EventBus.class);
+ eventDistributor = ComponentContext.getComponent(EventDistributor.class);
} catch (NoSuchBeanDefinitionException nbe) {
- return; // no provider is configured to provide events bus, so just return
+ return; // no provider is configured to provide events distributor, so just return
}
Account account = s_accountDao.findById(accountId);
@@ -238,11 +238,7 @@ private static void publishUsageEvent(String usageEventType, Long accountId, Lon
event.setDescription(eventDescription);
- try {
- s_eventBus.publish(event);
- } catch (EventBusException e) {
- LOGGER.warn("Failed to publish usage event on the event bus.");
- }
+ eventDistributor.publish(event);
}
static final String Name = "management-server";
diff --git a/engine/components-api/src/main/java/com/cloud/ha/HighAvailabilityManager.java b/engine/components-api/src/main/java/com/cloud/ha/HighAvailabilityManager.java
index 72737d0b04d2..ae47b1d76ed8 100644
--- a/engine/components-api/src/main/java/com/cloud/ha/HighAvailabilityManager.java
+++ b/engine/components-api/src/main/java/com/cloud/ha/HighAvailabilityManager.java
@@ -156,4 +156,5 @@ enum Step {
String getHaTag();
DeploymentPlanner getHAPlanner();
+ int expungeWorkItemsByVmList(List vmIds, Long batchSize);
}
diff --git a/engine/components-api/src/main/java/com/cloud/network/NetworkStateListener.java b/engine/components-api/src/main/java/com/cloud/network/NetworkStateListener.java
index 24be76e4d3be..107e177ef579 100644
--- a/engine/components-api/src/main/java/com/cloud/network/NetworkStateListener.java
+++ b/engine/components-api/src/main/java/com/cloud/network/NetworkStateListener.java
@@ -25,11 +25,9 @@
import javax.inject.Inject;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
-import org.apache.cloudstack.framework.events.EventBus;
-import org.apache.cloudstack.framework.events.EventBusException;
-import org.apache.logging.log4j.Logger;
+import org.apache.cloudstack.framework.events.EventDistributor;
import org.apache.logging.log4j.LogManager;
-import org.springframework.beans.factory.NoSuchBeanDefinitionException;
+import org.apache.logging.log4j.Logger;
import com.cloud.event.EventCategory;
import com.cloud.network.Network.Event;
@@ -43,7 +41,7 @@ public class NetworkStateListener implements StateListener t
return true;
}
- private void pubishOnEventBus(String event, String status, Network vo, State oldState, State newState) {
-
+ private void pubishOnEventBus(String event, String status, Network vo, State oldState, State newState) {
String configKey = "publish.resource.state.events";
String value = _configDao.getValue(configKey);
boolean configValue = Boolean.parseBoolean(value);
if(!configValue)
return;
- try {
- s_eventBus = ComponentContext.getComponent(EventBus.class);
- } catch (NoSuchBeanDefinitionException nbe) {
- return; // no provider is configured to provide events bus, so just return
+ if (eventDistributor == null) {
+ setEventDistributor(ComponentContext.getComponent(EventDistributor.class));
}
String resourceName = getEntityFromClassName(Network.class.getName());
org.apache.cloudstack.framework.events.Event eventMsg =
- new org.apache.cloudstack.framework.events.Event("management-server", EventCategory.RESOURCE_STATE_CHANGE_EVENT.getName(), event, resourceName, vo.getUuid());
- Map eventDescription = new HashMap();
+ new org.apache.cloudstack.framework.events.Event("management-server", EventCategory.RESOURCE_STATE_CHANGE_EVENT.getName(), event, resourceName, vo.getUuid());
+ Map eventDescription = new HashMap<>();
eventDescription.put("resource", resourceName);
eventDescription.put("id", vo.getUuid());
eventDescription.put("old-state", oldState.name());
@@ -92,11 +91,8 @@ private void pubishOnEventBus(String event, String status, Network vo, State old
eventDescription.put("eventDateTime", eventDate);
eventMsg.setDescription(eventDescription);
- try {
- s_eventBus.publish(eventMsg);
- } catch (EventBusException e) {
- logger.warn("Failed to publish state change event on the event bus.");
- }
+
+ eventDistributor.publish(eventMsg);
}
private String getEntityFromClassName(String entityClassName) {
diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java
index 9308be5fb320..b2ae8b898378 100755
--- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java
+++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java
@@ -126,12 +126,18 @@ public interface ResourceManager extends ResourceService, Configurable {
public List listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType type, long dcId);
+ public List listAllUpHostsInOneZoneByHypervisor(HypervisorType type, long dcId);
+
public List listAllUpAndEnabledHostsInOneZone(long dcId);
public List listAllHostsInOneZoneByType(Host.Type type, long dcId);
public List listAllHostsInAllZonesByType(Type type);
+ public List listAllHostsInOneZoneNotInClusterByHypervisor(final HypervisorType type, long dcId, long clusterId);
+
+ public List listAllHostsInOneZoneNotInClusterByHypervisors(List types, long dcId, long clusterId);
+
public List listAvailHypervisorInZone(Long hostId, Long zoneId);
public HostVO findHostByGuid(String guid);
diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java
index daeb4b19a187..c3909bc56b0d 100644
--- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java
+++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java
@@ -18,6 +18,7 @@
import java.math.BigDecimal;
import java.util.List;
+import java.util.Map;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
@@ -96,14 +97,6 @@ public interface StorageManager extends StorageService {
true,
ConfigKey.Scope.Global,
null);
- ConfigKey ConvertVmwareInstanceToKvmTimeout = new ConfigKey<>(Integer.class,
- "convert.vmware.instance.to.kvm.timeout",
- "Storage",
- "8",
- "Timeout (in hours) for the instance conversion process from VMware through the virt-v2v binary on a KVM host",
- true,
- ConfigKey.Scope.Global,
- null);
ConfigKey KvmAutoConvergence = new ConfigKey<>(Boolean.class,
"kvm.auto.convergence",
"Storage",
@@ -125,7 +118,7 @@ public interface StorageManager extends StorageService {
"storage.pool.disk.wait",
"Storage",
"60",
- "Timeout (in secs) for the storage pool disk (of managed pool) to become available in the host. Currently only supported for PowerFlex.",
+ "Timeout (in secs) for the storage pool disk (of managed pool) to become available in the host. Currently supported for PowerFlex only.",
true,
ConfigKey.Scope.StoragePool,
null);
@@ -134,7 +127,7 @@ public interface StorageManager extends StorageService {
"storage.pool.client.timeout",
"Storage",
"60",
- "Timeout (in secs) for the storage pool client connection timeout (for managed pools). Currently only supported for PowerFlex.",
+ "Timeout (in secs) for the API client connection timeout of storage pool (for managed pools). Currently supported for PowerFlex only.",
false,
ConfigKey.Scope.StoragePool,
null);
@@ -143,11 +136,20 @@ public interface StorageManager extends StorageService {
"storage.pool.client.max.connections",
"Storage",
"100",
- "Maximum connections for the storage pool client (for managed pools). Currently only supported for PowerFlex.",
+ "Maximum connections for the API client of storage pool (for managed pools). Currently supported for PowerFlex only.",
false,
ConfigKey.Scope.StoragePool,
null);
+ ConfigKey STORAGE_POOL_CONNECTED_CLIENTS_LIMIT = new ConfigKey<>(Integer.class,
+ "storage.pool.connected.clients.limit",
+ "Storage",
+ "-1",
+ "Maximum connected storage pool clients supported for the storage (for managed pools), <= 0 for unlimited (default: -1). Currently supported for PowerFlex only.",
+ true,
+ ConfigKey.Scope.StoragePool,
+ null);
+
ConfigKey STORAGE_POOL_IO_POLICY = new ConfigKey<>(String.class,
"kvm.storage.pool.io.policy",
"Storage",
@@ -259,6 +261,10 @@ static Boolean getFullCloneConfiguration(Long storeId) {
boolean canPoolProvideStorageStats(StoragePool pool);
+ boolean poolProvidesCustomStorageStats(StoragePool pool);
+
+ Map getCustomStorageStats(StoragePool pool);
+
/**
* Checks if a host has running VMs that are using its local storage pool.
* @return true if local storage is active on the host
@@ -295,6 +301,8 @@ static Boolean getFullCloneConfiguration(Long storeId) {
boolean canHostAccessStoragePool(Host host, StoragePool pool);
+ boolean canHostPrepareStoragePoolAccess(Host host, StoragePool pool);
+
Host getHost(long hostId);
Host updateSecondaryStorage(long secStorageId, String newUrl);
@@ -348,6 +356,10 @@ static Boolean getFullCloneConfiguration(Long storeId) {
boolean registerHostListener(String providerUuid, HypervisorHostListener listener);
+ Pair, Boolean> getStoragePoolNFSMountOpts(StoragePool pool, Map details);
+
+ String getStoragePoolMountFailureReason(String error);
+
boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException;
void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException;
@@ -368,6 +380,8 @@ static Boolean getFullCloneConfiguration(Long storeId) {
Long getDiskIopsWriteRate(ServiceOffering offering, DiskOffering diskOffering);
+ ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes);
+
void cleanupDownloadUrls();
void setDiskProfileThrottling(DiskProfile dskCh, ServiceOffering offering, DiskOffering diskOffering);
diff --git a/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java b/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java
index 2d51c3c08703..a1c54b90328b 100644
--- a/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java
+++ b/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java
@@ -264,11 +264,13 @@ public void setServiceOffering(ServiceOffering offering) {
_offering = offering;
}
+ @Override
public void setCpuOvercommitRatio(Float cpuOvercommitRatio) {
this.cpuOvercommitRatio = cpuOvercommitRatio;
}
+ @Override
public void setMemoryOvercommitRatio(Float memoryOvercommitRatio) {
this.memoryOvercommitRatio = memoryOvercommitRatio;
diff --git a/engine/components-api/src/test/java/com/cloud/network/NetworkStateListenerTest.java b/engine/components-api/src/test/java/com/cloud/network/NetworkStateListenerTest.java
new file mode 100644
index 000000000000..30960210bfb6
--- /dev/null
+++ b/engine/components-api/src/test/java/com/cloud/network/NetworkStateListenerTest.java
@@ -0,0 +1,40 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.network;
+
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.cloudstack.framework.events.EventDistributor;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.InjectMocks;
+import org.mockito.Mockito;
+import org.springframework.test.util.ReflectionTestUtils;
+
+public class NetworkStateListenerTest {
+ @InjectMocks
+ NetworkStateListener networkStateListener = new NetworkStateListener(Mockito.mock(ConfigurationDao.class));
+
+ @Test
+ public void testSetEventDistributor() {
+ EventDistributor eventDistributor = null;
+ networkStateListener.setEventDistributor(eventDistributor);
+ Assert.assertNull(ReflectionTestUtils.getField(networkStateListener, "eventDistributor"));
+ eventDistributor = Mockito.mock(EventDistributor.class);
+ networkStateListener.setEventDistributor(eventDistributor);
+ Assert.assertEquals(eventDistributor, ReflectionTestUtils.getField(networkStateListener, "eventDistributor"));
+ }
+}
diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java
index 22c0b3fd71a9..173fd9fc704a 100644
--- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java
+++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java
@@ -45,6 +45,8 @@
import com.cloud.agent.api.CheckVirtualMachineCommand;
import com.cloud.agent.api.CleanupNetworkRulesCmd;
import com.cloud.agent.api.Command;
+import com.cloud.agent.api.CreateStoragePoolCommand;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
import com.cloud.agent.api.MaintainCommand;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.ModifySshKeysCommand;
@@ -122,8 +124,9 @@ public int compare(final Object o1, final Object o2) {
StopCommand.class.toString(), CheckVirtualMachineCommand.class.toString(), PingTestCommand.class.toString(), CheckHealthCommand.class.toString(),
ReadyCommand.class.toString(), ShutdownCommand.class.toString(), SetupCommand.class.toString(),
CleanupNetworkRulesCmd.class.toString(), CheckNetworkCommand.class.toString(), PvlanSetupCommand.class.toString(), CheckOnHostCommand.class.toString(),
- ModifyTargetsCommand.class.toString(), ModifySshKeysCommand.class.toString(), ModifyStoragePoolCommand.class.toString(), SetupMSListCommand.class.toString(), RollingMaintenanceCommand.class.toString(),
- CleanupPersistentNetworkResourceCommand.class.toString()};
+ ModifyTargetsCommand.class.toString(), ModifySshKeysCommand.class.toString(),
+ CreateStoragePoolCommand.class.toString(), DeleteStoragePoolCommand.class.toString(), ModifyStoragePoolCommand.class.toString(),
+ SetupMSListCommand.class.toString(), RollingMaintenanceCommand.class.toString(), CleanupPersistentNetworkResourceCommand.class.toString()};
protected final static String[] s_commandsNotAllowedInConnectingMode = new String[] { StartCommand.class.toString(), CreateCommand.class.toString() };
static {
Arrays.sort(s_commandsAllowedInMaintenanceMode);
diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
index 824b9f5f45d6..d21e8b0fc7b2 100755
--- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -52,6 +52,7 @@
import com.cloud.configuration.Resource;
import com.cloud.domain.Domain;
import com.cloud.domain.dao.DomainDao;
+import com.cloud.exception.ResourceAllocationException;
import com.cloud.network.vpc.VpcVO;
import com.cloud.network.vpc.dao.VpcDao;
import com.cloud.user.dao.AccountDao;
@@ -89,6 +90,7 @@
import org.apache.cloudstack.jobs.JobInfo;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.reservation.dao.ReservationDao;
+import org.apache.cloudstack.resource.ResourceCleanupService;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
@@ -401,6 +403,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
private VpcDao vpcDao;
@Inject
private DomainDao domainDao;
+ @Inject
+ ResourceCleanupService resourceCleanupService;
VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this);
@@ -690,6 +694,7 @@ protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableExcepti
if (logger.isDebugEnabled()) {
logger.debug("Expunged " + vm);
}
+ resourceCleanupService.purgeExpungedVmResourcesLaterIfNeeded(vm);
}
private void handleUnsuccessfulExpungeOperation(List finalizeExpungeCommands, List nicExpungeCommands,
@@ -1076,6 +1081,10 @@ protected void checkAndAttemptMigrateVmAcrossCluster(final VMInstanceVO vm, fina
return;
}
Host lastHost = _hostDao.findById(vm.getLastHostId());
+ if (lastHost == null) {
+ logger.warn("Could not find last host with id [{}], skipping migrate VM [{}] across cluster check." , vm.getLastHostId(), vm.getUuid());
+ return;
+ }
if (destinationClusterId.equals(lastHost.getClusterId())) {
return;
}
@@ -1221,21 +1230,9 @@ public void orchestrateStart(final String vmUuid, final Map 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f)) {
- userVmDetailsDao.addDetail(vm.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO, cluster_detail_cpu.getValue(), true);
- userVmDetailsDao.addDetail(vm.getId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, cluster_detail_ram.getValue(), true);
- } else if (userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO) != null) {
- userVmDetailsDao.addDetail(vm.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO, cluster_detail_cpu.getValue(), true);
- userVmDetailsDao.addDetail(vm.getId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, cluster_detail_ram.getValue(), true);
- }
+ final Long clusterId = dest.getCluster().getId();
+ updateOverCommitRatioForVmProfile(vmProfile, clusterId);
- vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue()));
- vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue()));
StartAnswer startAnswer = null;
try {
@@ -1250,7 +1247,7 @@ public void orchestrateStart(final String vmUuid, final Map 1f) ||
+ (vmDetailCpu != null && Float.parseFloat(vmDetailCpu.getValue()) != parsedClusterCpuDetailCpu)) {
+ userVmDetailsDao.addDetail(vmProfile.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO, clusterDetailCpu.getValue(), true);
+ }
+ if ((vmDetailRam == null && parsedClusterDetailRam > 1f) ||
+ (vmDetailRam != null && Float.parseFloat(vmDetailRam.getValue()) != parsedClusterDetailRam)) {
+ userVmDetailsDao.addDetail(vmProfile.getId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, clusterDetailRam.getValue(), true);
+ }
+
+ vmProfile.setCpuOvercommitRatio(Float.parseFloat(clusterDetailCpu.getValue()));
+ vmProfile.setMemoryOvercommitRatio(Float.parseFloat(clusterDetailRam.getValue()));
+ }
+
/**
* Setting pod id to null can result in migration of Volumes across pods. This is not desirable for VMs which
* have a volume in Ready state (happens when a VM is shutdown and started again).
@@ -1992,20 +2010,24 @@ private void orchestrateStop(final String vmUuid, final boolean cleanUpEvenIfUna
}
private void updatePersistenceMap(Map vlanToPersistenceMap, NetworkVO networkVO) {
+ if (networkVO == null) {
+ return;
+ }
NetworkOfferingVO offeringVO = networkOfferingDao.findById(networkVO.getNetworkOfferingId());
- if (offeringVO != null) {
- Pair data = getVMNetworkDetails(networkVO, offeringVO.isPersistent());
- Boolean shouldDeleteNwResource = (MapUtils.isNotEmpty(vlanToPersistenceMap) && data != null) ? vlanToPersistenceMap.get(data.first()) : null;
- if (data != null && (shouldDeleteNwResource == null || shouldDeleteNwResource)) {
- vlanToPersistenceMap.put(data.first(), data.second());
- }
+ if (offeringVO == null) {
+ return;
+ }
+ Pair data = getVMNetworkDetails(networkVO, offeringVO.isPersistent());
+ Boolean shouldDeleteNwResource = (MapUtils.isNotEmpty(vlanToPersistenceMap) && data != null) ? vlanToPersistenceMap.get(data.first()) : null;
+ if (data != null && (shouldDeleteNwResource == null || shouldDeleteNwResource)) {
+ vlanToPersistenceMap.put(data.first(), data.second());
}
}
private Map getVlanToPersistenceMapForVM(long vmId) {
List userVmJoinVOs = userVmJoinDao.searchByIds(vmId);
Map vlanToPersistenceMap = new HashMap<>();
- if (userVmJoinVOs != null && !userVmJoinVOs.isEmpty()) {
+ if (CollectionUtils.isNotEmpty(userVmJoinVOs)) {
for (UserVmJoinVO userVmJoinVO : userVmJoinVOs) {
NetworkVO networkVO = _networkDao.findById(userVmJoinVO.getNetworkId());
updatePersistenceMap(vlanToPersistenceMap, networkVO);
@@ -2719,6 +2741,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy
_networkMgr.prepareNicForMigration(profile, dest);
volumeMgr.prepareForMigration(profile, dest);
profile.setConfigDriveLabel(VmConfigDriveLabel.value());
+ updateOverCommitRatioForVmProfile(profile, dest.getHost().getClusterId());
final VirtualMachineTO to = toVmTO(profile);
final PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(to);
@@ -3006,7 +3029,7 @@ protected void createStoragePoolMappingsForVolumes(VirtualMachineProfile profile
executeManagedStorageChecksWhenTargetStoragePoolNotProvided(targetHost, currentPool, volume);
if (ScopeType.HOST.equals(currentPool.getScope()) || isStorageCrossClusterMigration(plan.getClusterId(), currentPool)) {
createVolumeToStoragePoolMappingIfPossible(profile, plan, volumeToPoolObjectMap, volume, currentPool);
- } else if (shouldMapVolume(profile, volume, currentPool)){
+ } else if (shouldMapVolume(profile, currentPool)){
volumeToPoolObjectMap.put(volume, currentPool);
}
}
@@ -3018,11 +3041,10 @@ protected void createStoragePoolMappingsForVolumes(VirtualMachineProfile profile
* Some context: VMware migration workflow requires all volumes to be mapped (even if volume stays on its current pool);
* however, this is not necessary/desirable for the KVM flow.
*/
- protected boolean shouldMapVolume(VirtualMachineProfile profile, Volume volume, StoragePoolVO currentPool) {
+ protected boolean shouldMapVolume(VirtualMachineProfile profile, StoragePoolVO currentPool) {
boolean isManaged = currentPool.isManaged();
boolean isNotKvm = HypervisorType.KVM != profile.getHypervisorType();
- boolean isNotDatadisk = Type.DATADISK != volume.getVolumeType();
- return isNotKvm || isNotDatadisk || isManaged;
+ return isNotKvm || isManaged;
}
/**
@@ -4769,6 +4791,18 @@ protected void HandlePowerStateReport(final String subject, final String senderA
}
}
+ private ApiCommandResourceType getApiCommandResourceTypeForVm(VirtualMachine vm) {
+ switch (vm.getType()) {
+ case DomainRouter:
+ return ApiCommandResourceType.DomainRouter;
+ case ConsoleProxy:
+ return ApiCommandResourceType.ConsoleProxy;
+ case SecondaryStorageVm:
+ return ApiCommandResourceType.SystemVm;
+ }
+ return ApiCommandResourceType.VirtualMachine;
+ }
+
private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) {
Host host = _hostDao.findById(vm.getHostId());
Host poweredHost = _hostDao.findById(vm.getPowerHostId());
@@ -4816,7 +4850,7 @@ private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) {
+ " -> Running) from out-of-context transition. VM network environment may need to be reset");
ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, vm.getDomainId(),
- EventTypes.EVENT_VM_START, "Out of band VM power on", vm.getId(), ApiCommandResourceType.VirtualMachine.toString());
+ EventTypes.EVENT_VM_START, "Out of band VM power on", vm.getId(), getApiCommandResourceTypeForVm(vm).toString());
logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm.getInstanceName());
break;
@@ -4849,7 +4883,7 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) {
case Running:
case Stopped:
ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM,vm.getDomainId(),
- EventTypes.EVENT_VM_STOP, "Out of band VM power off", vm.getId(), ApiCommandResourceType.VirtualMachine.toString());
+ EventTypes.EVENT_VM_STOP, "Out of band VM power off", vm.getId(), getApiCommandResourceTypeForVm(vm).toString());
case Migrating:
logger.info("VM {} is at {} and we received a {} report while there is no pending jobs on it"
, vm.getInstanceName(), vm.getState(), vm.getPowerState());
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java
index d07fee322765..ea34f62ecd58 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java
@@ -259,6 +259,8 @@
import com.googlecode.ipv6.IPv6Address;
import org.jetbrains.annotations.NotNull;
+import static com.cloud.configuration.ConfigurationManager.MESSAGE_DELETE_VLAN_IP_RANGE_EVENT;
+
/**
* NetworkManagerImpl implements NetworkManager.
*/
@@ -763,6 +765,14 @@ public List extends Network> setupNetwork(final Account owner, final NetworkOf
continue;
}
+ // Ensure cidr size is equal to 64 for
+ // - networks other than shared networks
+ // - shared networks with SLAAC V6 only
+ if (predefined != null && StringUtils.isNotBlank(predefined.getIp6Cidr()) &&
+ (!GuestType.Shared.equals(offering.getGuestType()) || guru.isSlaacV6Only())) {
+ _networkModel.checkIp6CidrSizeEqualTo64(predefined.getIp6Cidr());
+ }
+
if (network.getId() != -1) {
if (network instanceof NetworkVO) {
networks.add((NetworkVO) network);
@@ -1031,48 +1041,84 @@ public void saveExtraDhcpOptions(final String networkUuid, final Long nicId, fin
}
}
- @DB
- @Override
- public Pair allocateNic(final NicProfile requested, final Network network, final Boolean isDefaultNic, int deviceId, final VirtualMachineProfile vm)
- throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException {
+ private NicVO persistNicAfterRaceCheck(final NicVO nic, final Long networkId, final NicProfile profile, int deviceId) {
+ return Transaction.execute(new TransactionCallback() {
+ @Override
+ public NicVO doInTransaction(TransactionStatus status) {
+ NicVO vo = _nicDao.findByIp4AddressAndNetworkId(profile.getIPv4Address(), networkId);
+ if (vo == null) {
+ applyProfileToNic(nic, profile, deviceId);
+ vo = _nicDao.persist(nic);
+ return vo;
+ } else {
+ return null;
+ }
+ }
+ });
+ }
+ private NicVO checkForRaceAndAllocateNic(final NicProfile requested, final Network network, final Boolean isDefaultNic, int deviceId, final VirtualMachineProfile vm)
+ throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException {
final NetworkVO ntwkVO = _networksDao.findById(network.getId());
logger.debug("Allocating nic for vm {} in network {} with requested profile {}", vm.getVirtualMachine(), network, requested);
final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, ntwkVO.getGuruName());
- if (requested != null && requested.getMode() == null) {
- requested.setMode(network.getMode());
- }
- final NicProfile profile = guru.allocate(network, requested, vm);
- if (profile == null) {
- return null;
- }
+ NicVO vo = null;
+ boolean retryIpAllocation;
+ do {
+ retryIpAllocation = false;
+ final NicProfile profile = guru.allocate(network, requested, vm);
+ if (profile == null) {
+ return null;
+ }
- if (isNicAllocatedForNsxPublicNetworkOnVR(network, profile, vm)) {
- String guruName = "NsxPublicNetworkGuru";
- NetworkGuru nsxGuru = AdapterBase.getAdapterByName(networkGurus, guruName);
- nsxGuru.allocate(network, profile, vm);
- }
+ if (isDefaultNic != null) {
+ profile.setDefaultNic(isDefaultNic);
+ }
- if (isDefaultNic != null) {
- profile.setDefaultNic(isDefaultNic);
- }
+ if (requested != null && requested.getMode() == null) {
+ profile.setMode(requested.getMode());
+ } else {
+ profile.setMode(network.getMode());
+ }
- if (requested != null && requested.getMode() == null) {
- profile.setMode(requested.getMode());
- } else {
- profile.setMode(network.getMode());
- }
+ vo = new NicVO(guru.getName(), vm.getId(), network.getId(), vm.getType());
+
+ DataCenterVO dcVo = _dcDao.findById(network.getDataCenterId());
+ if (dcVo.getNetworkType() == NetworkType.Basic) {
+ configureNicProfileBasedOnRequestedIp(requested, profile, network);
+ }
+
+ if (profile.getIpv4AllocationRaceCheck()) {
+ vo = persistNicAfterRaceCheck(vo, network.getId(), profile, deviceId);
+ } else {
+ applyProfileToNic(vo, profile, deviceId);
+ vo = _nicDao.persist(vo);
+ }
+
+ if (vo == null) {
+ if (requested.getRequestedIPv4() != null) {
+ throw new InsufficientVirtualNetworkCapacityException("Unable to acquire requested Guest IP address " + requested.getRequestedIPv4() + " for network " + network, DataCenter.class, dcVo.getId());
+ } else {
+ requested.setIPv4Address(null);
+ }
+ retryIpAllocation = true;
+ }
+ } while (retryIpAllocation);
+
+ return vo;
+ }
- NicVO vo = new NicVO(guru.getName(), vm.getId(), network.getId(), vm.getType());
+ @DB
+ @Override
+ public Pair allocateNic(final NicProfile requested, final Network network, final Boolean isDefaultNic, int deviceId, final VirtualMachineProfile vm)
+ throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException {
- DataCenterVO dcVo = _dcDao.findById(network.getDataCenterId());
- if (dcVo.getNetworkType() == NetworkType.Basic) {
- configureNicProfileBasedOnRequestedIp(requested, profile, network);
+ if (requested != null && requested.getMode() == null) {
+ requested.setMode(network.getMode());
}
- deviceId = applyProfileToNic(vo, profile, deviceId);
- vo = _nicDao.persist(vo);
+ NicVO vo = checkForRaceAndAllocateNic(requested, network, isDefaultNic, deviceId, vm);
final Integer networkRate = _networkModel.getNetworkRate(network.getId(), vm.getId());
final NicProfile vmNic = new NicProfile(vo, network, vo.getBroadcastUri(), vo.getIsolationUri(), networkRate, _networkModel.isSecurityGroupSupportedInNetwork(network),
@@ -2721,8 +2767,8 @@ private Network createGuestNetwork(final long networkOfferingId, final String na
}
}
- if (ipv6 && NetUtils.getIp6CidrSize(ip6Cidr) != 64) {
- throw new InvalidParameterValueException("IPv6 subnet should be exactly 64-bits in size");
+ if (ipv6 && !GuestType.Shared.equals(ntwkOff.getGuestType())) {
+ _networkModel.checkIp6CidrSizeEqualTo64(ip6Cidr);
}
//TODO(VXLAN): Support VNI specified
@@ -3062,17 +3108,7 @@ protected void checkL2OfferingServices(NetworkOfferingVO ntwkOff) {
@Override
@DB
public boolean shutdownNetwork(final long networkId, final ReservationContext context, final boolean cleanupElements) {
- NetworkVO network = _networksDao.findById(networkId);
- if (network.getState() == Network.State.Allocated) {
- logger.debug("Network is already shutdown: {}", network);
- return true;
- }
-
- if (network.getState() != Network.State.Implemented && network.getState() != Network.State.Shutdown) {
- logger.debug("Network is not implemented: {}", network);
- return false;
- }
-
+ NetworkVO network = null;
try {
//do global lock for the network
network = _networksDao.acquireInLockTable(networkId, NetworkLockTimeout.value());
@@ -3324,17 +3360,17 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con
final NetworkVO networkFinal = network;
try {
- Transaction.execute(new TransactionCallbackNoReturn() {
+ final List deletedVlanRangeToPublish = Transaction.execute(new TransactionCallback>() {
@Override
- public void doInTransactionWithoutResult(final TransactionStatus status) {
+ public List doInTransaction(TransactionStatus status) {
final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, networkFinal.getGuruName());
if (!guru.trash(networkFinal, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId()))) {
throw new CloudRuntimeException("Failed to trash network.");
}
-
- if (!deleteVlansInNetwork(networkFinal, context.getCaller().getId(), callerAccount)) {
- logger.warn("Failed to delete network {}; was unable to cleanup corresponding ip ranges", networkFinal);
+ Pair> deletedVlans = deleteVlansInNetwork(networkFinal, context.getCaller().getId(), callerAccount);
+ if (!deletedVlans.first()) {
+ logger.warn("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges");
throw new CloudRuntimeException("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges");
} else {
// commit transaction only when ips and vlans for the network are released successfully
@@ -3367,8 +3403,10 @@ public void doInTransactionWithoutResult(final TransactionStatus status) {
_resourceLimitMgr.decrementResourceCount(networkFinal.getAccountId(), ResourceType.network, networkFinal.getDisplayNetwork());
}
}
+ return deletedVlans.second();
}
});
+ publishDeletedVlanRanges(deletedVlanRangeToPublish);
if (_networksDao.findById(network.getId()) == null) {
// remove its related ACL permission
final Pair, Long> networkMsg = new Pair, Long>(Network.class, networkFinal.getId());
@@ -3386,6 +3424,14 @@ public void doInTransactionWithoutResult(final TransactionStatus status) {
return success;
}
+ private void publishDeletedVlanRanges(List deletedVlanRangeToPublish) {
+ if (CollectionUtils.isNotEmpty(deletedVlanRangeToPublish)) {
+ for (VlanVO vlan : deletedVlanRangeToPublish) {
+ _messageBus.publish(_name, MESSAGE_DELETE_VLAN_IP_RANGE_EVENT, PublishScope.LOCAL, vlan);
+ }
+ }
+ }
+
@Override
public boolean resourceCountNeedsUpdate(final NetworkOffering ntwkOff, final ACLType aclType) {
//Update resource count only for Isolated account specific non-system networks
@@ -3393,15 +3439,19 @@ public boolean resourceCountNeedsUpdate(final NetworkOffering ntwkOff, final ACL
return updateResourceCount;
}
- protected boolean deleteVlansInNetwork(final NetworkVO network, final long userId, final Account callerAccount) {
+ protected Pair> deleteVlansInNetwork(final NetworkVO network, final long userId, final Account callerAccount) {
final long networkId = network.getId();
//cleanup Public vlans
final List publicVlans = _vlanDao.listVlansByNetworkId(networkId);
+ List deletedPublicVlanRange = new ArrayList<>();
boolean result = true;
for (final VlanVO vlan : publicVlans) {
- if (!_configMgr.deleteVlanAndPublicIpRange(userId, vlan.getId(), callerAccount)) {
- logger.warn("Failed to delete vlan {});", vlan.getId());
+ VlanVO vlanRange = _configMgr.deleteVlanAndPublicIpRange(userId, vlan.getId(), callerAccount);
+ if (vlanRange == null) {
+ logger.warn("Failed to delete vlan " + vlan.getId() + ");");
result = false;
+ } else {
+ deletedPublicVlanRange.add(vlanRange);
}
}
@@ -3421,7 +3471,7 @@ protected boolean deleteVlansInNetwork(final NetworkVO network, final long userI
_dcDao.releaseVnet(BroadcastDomainType.getValue(network.getBroadcastUri()), network.getDataCenterId(),
network.getPhysicalNetworkId(), network.getAccountId(), network.getReservationId());
}
- return result;
+ return new Pair<>(result, deletedPublicVlanRange);
}
public class NetworkGarbageCollector extends ManagedContextRunnable {
@@ -4599,10 +4649,16 @@ public Pair importNic(final String macAddress, int deviceId
final NicVO vo = Transaction.execute(new TransactionCallback() {
@Override
public NicVO doInTransaction(TransactionStatus status) {
- NicVO existingNic = _nicDao.findByNetworkIdAndMacAddress(network.getId(), macAddress);
- String macAddressToPersist = macAddress;
+ if (StringUtils.isBlank(macAddress)) {
+ throw new CloudRuntimeException("Mac address not specified");
+ }
+ String macAddressToPersist = macAddress.trim();
+ if (!NetUtils.isValidMac(macAddressToPersist)) {
+ throw new CloudRuntimeException("Invalid mac address: " + macAddressToPersist);
+ }
+ NicVO existingNic = _nicDao.findByNetworkIdAndMacAddress(network.getId(), macAddressToPersist);
if (existingNic != null) {
- macAddressToPersist = generateNewMacAddressIfForced(network, macAddress, forced);
+ macAddressToPersist = generateNewMacAddressIfForced(network, macAddressToPersist, forced);
}
NicVO vo = new NicVO(network.getGuruName(), vm.getId(), network.getId(), vm.getType());
vo.setMacAddress(macAddressToPersist);
@@ -4647,7 +4703,7 @@ public NicVO doInTransaction(TransactionStatus status) {
final NicProfile vmNic = new NicProfile(vo, network, vo.getBroadcastUri(), vo.getIsolationUri(), networkRate, _networkModel.isSecurityGroupSupportedInNetwork(network),
_networkModel.getNetworkTag(vm.getHypervisorType(), network));
- return new Pair(vmNic, Integer.valueOf(deviceId));
+ return new Pair<>(vmNic, Integer.valueOf(deviceId));
}
protected String getSelectedIpForNicImport(Network network, DataCenter dataCenter, Network.IpAddresses ipAddresses) {
@@ -4691,7 +4747,7 @@ protected Pair getNetworkGatewayAndNetmaskForNicImport(Network n
private String generateNewMacAddressIfForced(Network network, String macAddress, boolean forced) {
if (!forced) {
- throw new CloudRuntimeException("NIC with MAC address = " + macAddress + " exists on network with ID = " + network.getId() +
+ throw new CloudRuntimeException("NIC with MAC address " + macAddress + " exists on network with ID " + network.getUuid() +
" and forced flag is disabled");
}
try {
@@ -4722,6 +4778,19 @@ public void unmanageNics(VirtualMachineProfile vm) {
}
}
+ @Override
+ public void expungeLbVmRefs(List vmIds, Long batchSize) {
+ if (CollectionUtils.isEmpty(networkElements) || CollectionUtils.isEmpty(vmIds)) {
+ return;
+ }
+ for (NetworkElement element : networkElements) {
+ if (element instanceof LoadBalancingServiceProvider) {
+ LoadBalancingServiceProvider lbProvider = (LoadBalancingServiceProvider)element;
+ lbProvider.expungeLbVmRefs(vmIds, batchSize);
+ }
+ }
+ }
+
@Override
public String getConfigComponentName() {
return NetworkOrchestrationService.class.getSimpleName();
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index ff9da6bccc2a..36e281459492 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -38,6 +38,11 @@
import javax.inject.Inject;
import javax.naming.ConfigurationException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.storage.DiskOfferingVO;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.user.AccountManager;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.ApiConstants.IoDriverPolicy;
import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
@@ -180,6 +185,8 @@ public enum UserVmCloneType {
}
+ @Inject
+ private AccountManager _accountMgr;
@Inject
EntityManager _entityMgr;
@Inject
@@ -195,6 +202,8 @@ public enum UserVmCloneType {
@Inject
protected VolumeDao _volumeDao;
@Inject
+ protected VMTemplateDao _templateDao;
+ @Inject
protected SnapshotDao _snapshotDao;
@Inject
protected SnapshotDataStoreDao _snapshotDataStoreDao;
@@ -1176,8 +1185,9 @@ public VolumeVO doInTransaction(TransactionStatus status) {
logger.error("Unable to destroy existing volume [{}] due to [{}].", volumeToString, e.getMessage());
}
// In case of VMware VM will continue to use the old root disk until expunged, so force expunge old root disk
- if (vm.getHypervisorType() == HypervisorType.VMware) {
- logger.info("Trying to expunge volume [{}] from primary data storage.", volumeToString);
+ // For system VM we do not need volume entry in Destroy state
+ if (vm.getHypervisorType() == HypervisorType.VMware || vm.getType().isUsedBySystem()) {
+ logger.info(String.format("Trying to expunge volume [%s] from primary data storage.", volumeToString));
AsyncCallFuture future = volService.expungeVolumeAsync(volFactory.getVolume(existingVolume.getId()));
try {
future.get();
@@ -1483,18 +1493,17 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest
for (VolumeVO vol : vols) {
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
- DataTO volTO = volumeInfo.getTO();
- DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
- disk.setDetails(getDetails(volumeInfo, dataStore));
-
PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore;
// This might impact other managed storages, enable requires access for migration in relevant datastore driver (currently enabled for PowerFlex storage pool only)
if (primaryDataStore.isManaged() && volService.requiresAccessForMigration(volumeInfo, dataStore)) {
volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore);
}
-
+ // make sure this is done AFTER grantAccess, as grantAccess may change the volume's state
+ DataTO volTO = volumeInfo.getTO();
+ DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
+ disk.setDetails(getDetails(volumeInfo, dataStore));
vm.addDisk(disk);
}
@@ -1677,7 +1686,7 @@ protected void checkAndUpdateVolumeAccountResourceCount(VolumeVO originalEntry,
}
}
- private Pair recreateVolume(VolumeVO vol, VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, StorageAccessException {
+ private Pair recreateVolume(VolumeVO vol, VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, StorageAccessException, ResourceAllocationException {
String volToString = getReflectOnlySelectedFields(vol);
VolumeVO newVol;
@@ -1710,6 +1719,7 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro
}
logger.debug("Created new volume [{}] from old volume [{}].", newVolToString, volToString);
}
+ updateVolumeSize(destPool, newVol);
VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool);
Long templateId = newVol.getTemplateId();
for (int i = 0; i < 2; i++) {
@@ -1841,8 +1851,39 @@ protected void grantVolumeAccessToHostIfNeeded(PrimaryDataStore volumeStore, lon
}
}
+ /**
+ * This method checks if size of volume on the data store would be different.
+ * If it's different it verifies the resource limits and updates the volume's size
+ */
+ protected void updateVolumeSize(DataStore store, VolumeVO vol) throws ResourceAllocationException {
+ if (store == null || !(store.getDriver() instanceof PrimaryDataStoreDriver)) {
+ return;
+ }
+
+ VMTemplateVO template = vol.getTemplateId() != null ? _templateDao.findById(vol.getTemplateId()) : null;
+ PrimaryDataStoreDriver driver = (PrimaryDataStoreDriver) store.getDriver();
+ long newSize = driver.getVolumeSizeRequiredOnPool(vol.getSize(),
+ template == null ? null : template.getSize(),
+ vol.getPassphraseId() != null);
+
+ if (newSize != vol.getSize()) {
+ DiskOfferingVO diskOffering = diskOfferingDao.findByIdIncludingRemoved(vol.getDiskOfferingId());
+ if (newSize > vol.getSize()) {
+ _resourceLimitMgr.checkPrimaryStorageResourceLimit(_accountMgr.getActiveAccountById(vol.getAccountId()),
+ vol.isDisplay(), newSize - vol.getSize(), diskOffering);
+ _resourceLimitMgr.incrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(),
+ newSize - vol.getSize(), diskOffering);
+ } else {
+ _resourceLimitMgr.decrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(),
+ vol.getSize() - newSize, diskOffering);
+ }
+ vol.setSize(newSize);
+ _volsDao.persist(vol);
+ }
+ }
+
@Override
- public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException {
+ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException, ResourceAllocationException {
if (dest == null) {
String msg = String.format("Unable to prepare volumes for the VM [%s] because DeployDestination is null.", vm.getVirtualMachine());
logger.error(msg);
@@ -1865,7 +1906,7 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto
String volToString = getReflectOnlySelectedFields(vol);
- store = (PrimaryDataStore)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary);
+ store = (PrimaryDataStore) dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary);
// For zone-wide managed storage, it is possible that the VM can be started in another
// cluster. In that case, make sure that the volume is in the right access group.
@@ -1876,6 +1917,8 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto
long lastClusterId = lastHost == null || lastHost.getClusterId() == null ? -1 : lastHost.getClusterId();
long clusterId = host == null || host.getClusterId() == null ? -1 : host.getClusterId();
+ updateVolumeSize(store, (VolumeVO) vol);
+
if (lastClusterId != clusterId) {
if (lastHost != null) {
storageMgr.removeStoragePoolFromCluster(lastHost.getId(), vol.get_iScsiName(), store);
@@ -1895,6 +1938,7 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto
}
} else if (task.type == VolumeTaskType.MIGRATE) {
store = (PrimaryDataStore) dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary);
+ updateVolumeSize(store, task.volume);
vol = migrateVolume(task.volume, store);
} else if (task.type == VolumeTaskType.RECREATE) {
Pair result = recreateVolume(task.volume, vm, dest);
diff --git a/engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestratorTest.java b/engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestratorTest.java
index d1532cdbef14..58746a9a6cf2 100644
--- a/engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestratorTest.java
+++ b/engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestratorTest.java
@@ -16,6 +16,8 @@
// under the License.
package org.apache.cloudstack.engine.orchestration;
+import static org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService.NetworkLockTimeout;
+import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
@@ -30,6 +32,7 @@
import java.util.Map;
import com.cloud.dc.DataCenter;
+import com.cloud.exception.InsufficientVirtualNetworkCapacityException;
import com.cloud.network.IpAddressManager;
import com.cloud.utils.Pair;
import org.junit.Assert;
@@ -38,6 +41,7 @@
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.mockito.ArgumentMatchers;
+import org.mockito.MockedStatic;
import org.mockito.Mockito;
import com.cloud.api.query.dao.DomainRouterJoinDao;
@@ -69,6 +73,9 @@
import com.cloud.network.vpc.VpcManager;
import com.cloud.network.vpc.VpcVO;
import com.cloud.offerings.NetworkOfferingVO;
+import com.cloud.utils.db.EntityManager;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.db.TransactionCallback;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.Ip;
import com.cloud.vm.DomainRouterVO;
@@ -93,7 +100,7 @@
@RunWith(JUnit4.class)
public class NetworkOrchestratorTest extends TestCase {
- NetworkOrchestrator testOrchastrator = Mockito.spy(new NetworkOrchestrator());
+ NetworkOrchestrator testOrchestrator = Mockito.spy(new NetworkOrchestrator());
private String guruName = "GuestNetworkGuru";
private String dhcpProvider = "VirtualRouter";
@@ -112,21 +119,22 @@ public class NetworkOrchestratorTest extends TestCase {
@Before
public void setUp() {
// make class-scope mocks
- testOrchastrator._nicDao = mock(NicDao.class);
- testOrchastrator._networksDao = mock(NetworkDao.class);
- testOrchastrator._networkModel = mock(NetworkModel.class);
- testOrchastrator._nicSecondaryIpDao = mock(NicSecondaryIpDao.class);
- testOrchastrator._ntwkSrvcDao = mock(NetworkServiceMapDao.class);
- testOrchastrator._nicIpAliasDao = mock(NicIpAliasDao.class);
- testOrchastrator._ipAddressDao = mock(IPAddressDao.class);
- testOrchastrator._vlanDao = mock(VlanDao.class);
- testOrchastrator._networkModel = mock(NetworkModel.class);
- testOrchastrator._nicExtraDhcpOptionDao = mock(NicExtraDhcpOptionDao.class);
- testOrchastrator.routerDao = mock(DomainRouterDao.class);
- testOrchastrator.routerNetworkDao = mock(RouterNetworkDao.class);
- testOrchastrator._vpcMgr = mock(VpcManager.class);
- testOrchastrator.routerJoinDao = mock(DomainRouterJoinDao.class);
- testOrchastrator._ipAddrMgr = mock(IpAddressManager.class);
+ testOrchestrator._nicDao = mock(NicDao.class);
+ testOrchestrator._networksDao = mock(NetworkDao.class);
+ testOrchestrator._networkModel = mock(NetworkModel.class);
+ testOrchestrator._nicSecondaryIpDao = mock(NicSecondaryIpDao.class);
+ testOrchestrator._ntwkSrvcDao = mock(NetworkServiceMapDao.class);
+ testOrchestrator._nicIpAliasDao = mock(NicIpAliasDao.class);
+ testOrchestrator._ipAddressDao = mock(IPAddressDao.class);
+ testOrchestrator._vlanDao = mock(VlanDao.class);
+ testOrchestrator._networkModel = mock(NetworkModel.class);
+ testOrchestrator._nicExtraDhcpOptionDao = mock(NicExtraDhcpOptionDao.class);
+ testOrchestrator.routerDao = mock(DomainRouterDao.class);
+ testOrchestrator.routerNetworkDao = mock(RouterNetworkDao.class);
+ testOrchestrator._vpcMgr = mock(VpcManager.class);
+ testOrchestrator.routerJoinDao = mock(DomainRouterJoinDao.class);
+ testOrchestrator._ipAddrMgr = mock(IpAddressManager.class);
+ testOrchestrator._entityMgr = mock(EntityManager.class);
DhcpServiceProvider provider = mock(DhcpServiceProvider.class);
Map capabilities = new HashMap();
@@ -135,13 +143,13 @@ public void setUp() {
when(provider.getCapabilities()).thenReturn(services);
capabilities.put(Network.Capability.DhcpAccrossMultipleSubnets, "true");
- when(testOrchastrator._ntwkSrvcDao.getProviderForServiceInNetwork(ArgumentMatchers.anyLong(), ArgumentMatchers.eq(Service.Dhcp))).thenReturn(dhcpProvider);
- when(testOrchastrator._networkModel.getElementImplementingProvider(dhcpProvider)).thenReturn(provider);
+ when(testOrchestrator._ntwkSrvcDao.getProviderForServiceInNetwork(ArgumentMatchers.anyLong(), ArgumentMatchers.eq(Service.Dhcp))).thenReturn(dhcpProvider);
+ when(testOrchestrator._networkModel.getElementImplementingProvider(dhcpProvider)).thenReturn(provider);
when(guru.getName()).thenReturn(guruName);
List networkGurus = new ArrayList();
networkGurus.add(guru);
- testOrchastrator.networkGurus = networkGurus;
+ testOrchestrator.networkGurus = networkGurus;
when(networkOffering.getGuestType()).thenReturn(GuestType.L2);
when(networkOffering.getId()).thenReturn(networkOfferingId);
@@ -156,21 +164,21 @@ public void testRemoveDhcpServiceWithNic() {
// make sure that release dhcp will be called
when(vm.getType()).thenReturn(Type.User);
- when(testOrchastrator._networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)).thenReturn(true);
+ when(testOrchestrator._networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)).thenReturn(true);
when(network.getTrafficType()).thenReturn(TrafficType.Guest);
when(network.getGuestType()).thenReturn(GuestType.Shared);
- when(testOrchastrator._nicDao.listByNetworkIdTypeAndGatewayAndBroadcastUri(nic.getNetworkId(), VirtualMachine.Type.User, nic.getIPv4Gateway(), nic.getBroadcastUri()))
+ when(testOrchestrator._nicDao.listByNetworkIdTypeAndGatewayAndBroadcastUri(nic.getNetworkId(), VirtualMachine.Type.User, nic.getIPv4Gateway(), nic.getBroadcastUri()))
.thenReturn(new ArrayList());
when(network.getGuruName()).thenReturn(guruName);
- when(testOrchastrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
+ when(testOrchestrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
- testOrchastrator.removeNic(vm, nic);
+ testOrchestrator.removeNic(vm, nic);
verify(nic, times(1)).setState(Nic.State.Deallocating);
- verify(testOrchastrator._networkModel, times(2)).getElementImplementingProvider(dhcpProvider);
- verify(testOrchastrator._ntwkSrvcDao, times(2)).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
- verify(testOrchastrator._networksDao, times(2)).findById(nic.getNetworkId());
+ verify(testOrchestrator._networkModel, times(2)).getElementImplementingProvider(dhcpProvider);
+ verify(testOrchestrator._ntwkSrvcDao, times(2)).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
+ verify(testOrchestrator._networksDao, times(2)).findById(nic.getNetworkId());
}
@Test
public void testDontRemoveDhcpServiceFromDomainRouter() {
@@ -183,14 +191,14 @@ public void testDontRemoveDhcpServiceFromDomainRouter() {
when(vm.getType()).thenReturn(Type.DomainRouter);
when(network.getGuruName()).thenReturn(guruName);
- when(testOrchastrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
+ when(testOrchestrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
- testOrchastrator.removeNic(vm, nic);
+ testOrchestrator.removeNic(vm, nic);
verify(nic, times(1)).setState(Nic.State.Deallocating);
- verify(testOrchastrator._networkModel, never()).getElementImplementingProvider(dhcpProvider);
- verify(testOrchastrator._ntwkSrvcDao, never()).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
- verify(testOrchastrator._networksDao, times(1)).findById(nic.getNetworkId());
+ verify(testOrchestrator._networkModel, never()).getElementImplementingProvider(dhcpProvider);
+ verify(testOrchestrator._ntwkSrvcDao, never()).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
+ verify(testOrchestrator._networksDao, times(1)).findById(nic.getNetworkId());
}
@Test
public void testDontRemoveDhcpServiceWhenNotProvided() {
@@ -201,45 +209,45 @@ public void testDontRemoveDhcpServiceWhenNotProvided() {
// make sure that release dhcp will *not* be called
when(vm.getType()).thenReturn(Type.User);
- when(testOrchastrator._networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)).thenReturn(false);
+ when(testOrchestrator._networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)).thenReturn(false);
when(network.getGuruName()).thenReturn(guruName);
- when(testOrchastrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
+ when(testOrchestrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
- testOrchastrator.removeNic(vm, nic);
+ testOrchestrator.removeNic(vm, nic);
verify(nic, times(1)).setState(Nic.State.Deallocating);
- verify(testOrchastrator._networkModel, never()).getElementImplementingProvider(dhcpProvider);
- verify(testOrchastrator._ntwkSrvcDao, never()).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
- verify(testOrchastrator._networksDao, times(1)).findById(nic.getNetworkId());
+ verify(testOrchestrator._networkModel, never()).getElementImplementingProvider(dhcpProvider);
+ verify(testOrchestrator._ntwkSrvcDao, never()).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
+ verify(testOrchestrator._networksDao, times(1)).findById(nic.getNetworkId());
}
@Test
public void testCheckL2OfferingServicesEmptyServices() {
- when(testOrchastrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(new ArrayList<>());
- when(testOrchastrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(false);
- testOrchastrator.checkL2OfferingServices(networkOffering);
+ when(testOrchestrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(new ArrayList<>());
+ when(testOrchestrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(false);
+ testOrchestrator.checkL2OfferingServices(networkOffering);
}
@Test
public void testCheckL2OfferingServicesUserDataOnly() {
- when(testOrchastrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.UserData));
- when(testOrchastrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(true);
- testOrchastrator.checkL2OfferingServices(networkOffering);
+ when(testOrchestrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.UserData));
+ when(testOrchestrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(true);
+ testOrchestrator.checkL2OfferingServices(networkOffering);
}
@Test(expected = InvalidParameterValueException.class)
public void testCheckL2OfferingServicesMultipleServicesIncludingUserData() {
- when(testOrchastrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.UserData, Service.Dhcp));
- when(testOrchastrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(true);
- testOrchastrator.checkL2OfferingServices(networkOffering);
+ when(testOrchestrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.UserData, Service.Dhcp));
+ when(testOrchestrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(true);
+ testOrchestrator.checkL2OfferingServices(networkOffering);
}
@Test(expected = InvalidParameterValueException.class)
public void testCheckL2OfferingServicesMultipleServicesNotIncludingUserData() {
- when(testOrchastrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.Dns, Service.Dhcp));
- when(testOrchastrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(false);
- testOrchastrator.checkL2OfferingServices(networkOffering);
+ when(testOrchestrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.Dns, Service.Dhcp));
+ when(testOrchestrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(false);
+ testOrchestrator.checkL2OfferingServices(networkOffering);
}
@Test
@@ -251,7 +259,7 @@ public void testConfigureNicProfileBasedOnRequestedIpTestMacNull() {
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, "192.168.100.1", "255.255.255.0", "00-88-14-4D-4C-FB",
requestedNicProfile, null, "192.168.100.150");
- testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
+ testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert("192.168.100.150", "192.168.100.1", "255.255.255.0", nicProfile, 1, 1);
}
@@ -265,7 +273,7 @@ public void testConfigureNicProfileBasedOnRequestedIpTestNicProfileMacNotNull()
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, "192.168.100.1", "255.255.255.0", "00-88-14-4D-4C-FB",
requestedNicProfile, "00-88-14-4D-4C-FB", "192.168.100.150");
- testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
+ testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert("192.168.100.150", "192.168.100.1", "255.255.255.0", nicProfile, 1, 0);
}
@@ -292,7 +300,7 @@ private void testConfigureNicProfileBasedOnRequestedIpTestRequestedIp(String req
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, "192.168.100.1", "255.255.255.0", "00-88-14-4D-4C-FB",
requestedNicProfile, null, requestedIpv4Address);
- testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
+ testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert(null, null, null, nicProfile, 0, 0);
}
@@ -319,7 +327,7 @@ private void testConfigureNicProfileBasedOnRequestedIpTestGateway(String ipv4Gat
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, ipv4Gateway, "255.255.255.0", "00-88-14-4D-4C-FB",
requestedNicProfile, "00-88-14-4D-4C-FB", "192.168.100.150");
- testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
+ testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert(null, null, null, nicProfile, 1, 0);
}
@@ -345,7 +353,7 @@ private void testConfigureNicProfileBasedOnRequestedIpTestNetmask(String ipv4Net
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, "192.168.100.1", ipv4Netmask, "00-88-14-4D-4C-FB",
requestedNicProfile, "00-88-14-4D-4C-FB", "192.168.100.150");
- testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
+ testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert(null, null, null, nicProfile, 1, 0);
}
@@ -357,9 +365,9 @@ public void testConfigureNicProfileBasedOnRequestedIpTestIPAddressVONull() {
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, "192.168.100.1", "255.255.255.0", "00-88-14-4D-4C-FB",
requestedNicProfile, "00-88-14-4D-4C-FB", "192.168.100.150");
- when(testOrchastrator._vlanDao.findByNetworkIdAndIpv4(Mockito.anyLong(), Mockito.anyString())).thenReturn(null);
+ when(testOrchestrator._vlanDao.findByNetworkIdAndIpv4(Mockito.anyLong(), Mockito.anyString())).thenReturn(null);
- testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
+ testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert(null, null, null, nicProfile, 0, 0);
}
@@ -375,21 +383,21 @@ private void configureTestConfigureNicProfileBasedOnRequestedIpTests(NicProfile
when(ipVoSpy.getState()).thenReturn(state);
if (ipVoIsNull) {
- when(testOrchastrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
+ when(testOrchestrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
} else {
- when(testOrchastrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
+ when(testOrchestrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
}
VlanVO vlanSpy = Mockito.spy(new VlanVO(Vlan.VlanType.DirectAttached, "vlanTag", vlanGateway, vlanNetmask, 0l, "192.168.100.100 - 192.168.100.200", 0l, new Long(0l),
"ip6Gateway", "ip6Cidr", "ip6Range"));
Mockito.doReturn(0l).when(vlanSpy).getId();
- when(testOrchastrator._vlanDao.findByNetworkIdAndIpv4(Mockito.anyLong(), Mockito.anyString())).thenReturn(vlanSpy);
- when(testOrchastrator._ipAddressDao.acquireInLockTable(Mockito.anyLong())).thenReturn(ipVoSpy);
- when(testOrchastrator._ipAddressDao.update(Mockito.anyLong(), Mockito.any(IPAddressVO.class))).thenReturn(true);
- when(testOrchastrator._ipAddressDao.releaseFromLockTable(Mockito.anyLong())).thenReturn(true);
+ when(testOrchestrator._vlanDao.findByNetworkIdAndIpv4(Mockito.anyLong(), Mockito.anyString())).thenReturn(vlanSpy);
+ when(testOrchestrator._ipAddressDao.acquireInLockTable(Mockito.anyLong())).thenReturn(ipVoSpy);
+ when(testOrchestrator._ipAddressDao.update(Mockito.anyLong(), Mockito.any(IPAddressVO.class))).thenReturn(true);
+ when(testOrchestrator._ipAddressDao.releaseFromLockTable(Mockito.anyLong())).thenReturn(true);
try {
- when(testOrchastrator._networkModel.getNextAvailableMacAddressInNetwork(Mockito.anyLong())).thenReturn(macAddress);
+ when(testOrchestrator._networkModel.getNextAvailableMacAddressInNetwork(Mockito.anyLong())).thenReturn(macAddress);
} catch (InsufficientAddressCapacityException e) {
e.printStackTrace();
}
@@ -397,9 +405,9 @@ private void configureTestConfigureNicProfileBasedOnRequestedIpTests(NicProfile
private void verifyAndAssert(String requestedIpv4Address, String ipv4Gateway, String ipv4Netmask, NicProfile nicProfile, int acquireLockAndCheckIfIpv4IsFreeTimes,
int nextMacAddressTimes) {
- verify(testOrchastrator, times(acquireLockAndCheckIfIpv4IsFreeTimes)).acquireLockAndCheckIfIpv4IsFree(Mockito.any(Network.class), Mockito.anyString());
+ verify(testOrchestrator, times(acquireLockAndCheckIfIpv4IsFreeTimes)).acquireLockAndCheckIfIpv4IsFree(Mockito.any(Network.class), Mockito.anyString());
try {
- verify(testOrchastrator._networkModel, times(nextMacAddressTimes)).getNextAvailableMacAddressInNetwork(Mockito.anyLong());
+ verify(testOrchestrator._networkModel, times(nextMacAddressTimes)).getNextAvailableMacAddressInNetwork(Mockito.anyLong());
} catch (InsufficientAddressCapacityException e) {
e.printStackTrace();
}
@@ -441,27 +449,27 @@ private void executeTestAcquireLockAndCheckIfIpv4IsFree(IPAddressVO.State state,
ipVoSpy.setState(state);
ipVoSpy.setState(state);
if (isIPAddressVONull) {
- when(testOrchastrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(null);
+ when(testOrchestrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(null);
} else {
- when(testOrchastrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
+ when(testOrchestrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
}
- when(testOrchastrator._ipAddressDao.acquireInLockTable(Mockito.anyLong())).thenReturn(ipVoSpy);
- when(testOrchastrator._ipAddressDao.releaseFromLockTable(Mockito.anyLong())).thenReturn(true);
- when(testOrchastrator._ipAddressDao.update(Mockito.anyLong(), Mockito.any(IPAddressVO.class))).thenReturn(true);
+ when(testOrchestrator._ipAddressDao.acquireInLockTable(Mockito.anyLong())).thenReturn(ipVoSpy);
+ when(testOrchestrator._ipAddressDao.releaseFromLockTable(Mockito.anyLong())).thenReturn(true);
+ when(testOrchestrator._ipAddressDao.update(Mockito.anyLong(), Mockito.any(IPAddressVO.class))).thenReturn(true);
- testOrchastrator.acquireLockAndCheckIfIpv4IsFree(network, "192.168.100.150");
+ testOrchestrator.acquireLockAndCheckIfIpv4IsFree(network, "192.168.100.150");
- verify(testOrchastrator._ipAddressDao, Mockito.times(findByIpTimes)).findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString());
- verify(testOrchastrator._ipAddressDao, Mockito.times(acquireLockTimes)).acquireInLockTable(Mockito.anyLong());
- verify(testOrchastrator._ipAddressDao, Mockito.times(releaseFromLockTimes)).releaseFromLockTable(Mockito.anyLong());
- verify(testOrchastrator._ipAddressDao, Mockito.times(updateTimes)).update(Mockito.anyLong(), Mockito.any(IPAddressVO.class));
- verify(testOrchastrator, Mockito.times(validateTimes)).validateLockedRequestedIp(Mockito.any(IPAddressVO.class), Mockito.any(IPAddressVO.class));
+ verify(testOrchestrator._ipAddressDao, Mockito.times(findByIpTimes)).findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString());
+ verify(testOrchestrator._ipAddressDao, Mockito.times(acquireLockTimes)).acquireInLockTable(Mockito.anyLong());
+ verify(testOrchestrator._ipAddressDao, Mockito.times(releaseFromLockTimes)).releaseFromLockTable(Mockito.anyLong());
+ verify(testOrchestrator._ipAddressDao, Mockito.times(updateTimes)).update(Mockito.anyLong(), Mockito.any(IPAddressVO.class));
+ verify(testOrchestrator, Mockito.times(validateTimes)).validateLockedRequestedIp(Mockito.any(IPAddressVO.class), Mockito.any(IPAddressVO.class));
}
@Test(expected = InvalidParameterValueException.class)
public void validateLockedRequestedIpTestNullLockedIp() {
IPAddressVO ipVoSpy = Mockito.spy(new IPAddressVO(new Ip("192.168.100.100"), 0l, 0l, 0l, true));
- testOrchastrator.validateLockedRequestedIp(ipVoSpy, null);
+ testOrchestrator.validateLockedRequestedIp(ipVoSpy, null);
}
@Test
@@ -476,7 +484,7 @@ public void validateLockedRequestedIpTestNotFreeLockedIp() {
IPAddressVO lockedIp = ipVoSpy;
lockedIp.setState(states[i]);
try {
- testOrchastrator.validateLockedRequestedIp(ipVoSpy, lockedIp);
+ testOrchestrator.validateLockedRequestedIp(ipVoSpy, lockedIp);
} catch (InvalidParameterValueException e) {
expectedException = true;
}
@@ -489,7 +497,7 @@ public void validateLockedRequestedIpTestFreeAndNotNullIp() {
IPAddressVO ipVoSpy = Mockito.spy(new IPAddressVO(new Ip("192.168.100.100"), 0l, 0l, 0l, true));
IPAddressVO lockedIp = ipVoSpy;
lockedIp.setState(State.Free);
- testOrchastrator.validateLockedRequestedIp(ipVoSpy, lockedIp);
+ testOrchestrator.validateLockedRequestedIp(ipVoSpy, lockedIp);
}
@Test
@@ -500,16 +508,16 @@ public void testDontReleaseNicWhenPreserveNicsSettingEnabled() {
when(vm.getType()).thenReturn(Type.User);
when(network.getGuruName()).thenReturn(guruName);
- when(testOrchastrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
+ when(testOrchestrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
Long nicId = 1L;
when(nic.getId()).thenReturn(nicId);
when(vm.getParameter(VirtualMachineProfile.Param.PreserveNics)).thenReturn(true);
- testOrchastrator.removeNic(vm, nic);
+ testOrchestrator.removeNic(vm, nic);
verify(nic, never()).setState(Nic.State.Deallocating);
- verify(testOrchastrator._nicDao, never()).remove(nicId);
+ verify(testOrchestrator._nicDao, never()).remove(nicId);
}
public void encodeVlanIdIntoBroadcastUriTestVxlan() {
@@ -568,7 +576,7 @@ public void encodeVlanIdIntoBroadcastUriTestNullVxlanIdWithSchemaIsolationVlan()
@Test(expected = InvalidParameterValueException.class)
public void encodeVlanIdIntoBroadcastUriTestNullNetwork() {
- URI resultUri = testOrchastrator.encodeVlanIdIntoBroadcastUri("vxlan://123", null);
+ URI resultUri = testOrchestrator.encodeVlanIdIntoBroadcastUri("vxlan://123", null);
}
private void encodeVlanIdIntoBroadcastUriPrepareAndTest(String vlanId, String isolationMethod, String expectedIsolation, String expectedUri) {
@@ -577,7 +585,7 @@ private void encodeVlanIdIntoBroadcastUriPrepareAndTest(String vlanId, String is
isolationMethods.add(isolationMethod);
physicalNetwork.setIsolationMethods(isolationMethods);
- URI resultUri = testOrchastrator.encodeVlanIdIntoBroadcastUri(vlanId, physicalNetwork);
+ URI resultUri = testOrchestrator.encodeVlanIdIntoBroadcastUri(vlanId, physicalNetwork);
Assert.assertEquals(expectedIsolation, resultUri.getScheme());
Assert.assertEquals(expectedUri, resultUri.toString());
@@ -595,17 +603,17 @@ private NicProfile prepareMocksAndRunPrepareNic(VirtualMachine.Type vmType, bool
Mockito.when(network.getDns2()).thenReturn(ip4Dns[1]);
Mockito.when(network.getIp6Dns1()).thenReturn(ip6Dns[0]);
Mockito.when(network.getIp6Dns2()).thenReturn(ip6Dns[1]);
- Mockito.when(testOrchastrator._networkModel.getNetworkRate(networkId, vmId)).thenReturn(networkRate);
+ Mockito.when(testOrchestrator._networkModel.getNetworkRate(networkId, vmId)).thenReturn(networkRate);
NicVO nicVO = Mockito.mock(NicVO.class);
Mockito.when(nicVO.isDefaultNic()).thenReturn(isDefaultNic);
- Mockito.when(testOrchastrator._nicDao.findById(nicId)).thenReturn(nicVO);
- Mockito.when(testOrchastrator._nicDao.update(nicId, nicVO)).thenReturn(true);
- Mockito.when(testOrchastrator._networkModel.isSecurityGroupSupportedInNetwork(network)).thenReturn(false);
- Mockito.when(testOrchastrator._networkModel.getNetworkTag(hypervisorType, network)).thenReturn(null);
- Mockito.when(testOrchastrator._ntwkSrvcDao.getDistinctProviders(networkId)).thenReturn(new ArrayList<>());
- testOrchastrator.networkElements = new ArrayList<>();
- Mockito.when(testOrchastrator._nicExtraDhcpOptionDao.listByNicId(nicId)).thenReturn(new ArrayList<>());
- Mockito.when(testOrchastrator._ntwkSrvcDao.areServicesSupportedInNetwork(networkId, Service.Dhcp)).thenReturn(false);
+ Mockito.when(testOrchestrator._nicDao.findById(nicId)).thenReturn(nicVO);
+ Mockito.when(testOrchestrator._nicDao.update(nicId, nicVO)).thenReturn(true);
+ Mockito.when(testOrchestrator._networkModel.isSecurityGroupSupportedInNetwork(network)).thenReturn(false);
+ Mockito.when(testOrchestrator._networkModel.getNetworkTag(hypervisorType, network)).thenReturn(null);
+ Mockito.when(testOrchestrator._ntwkSrvcDao.getDistinctProviders(networkId)).thenReturn(new ArrayList<>());
+ testOrchestrator.networkElements = new ArrayList<>();
+ Mockito.when(testOrchestrator._nicExtraDhcpOptionDao.listByNicId(nicId)).thenReturn(new ArrayList<>());
+ Mockito.when(testOrchestrator._ntwkSrvcDao.areServicesSupportedInNetwork(networkId, Service.Dhcp)).thenReturn(false);
VirtualMachineProfile virtualMachineProfile = Mockito.mock(VirtualMachineProfile.class);
Mockito.when(virtualMachineProfile.getType()).thenReturn(vmType);
Mockito.when(virtualMachineProfile.getId()).thenReturn(vmId);
@@ -634,7 +642,7 @@ private NicProfile prepareMocksAndRunPrepareNic(VirtualMachine.Type vmType, bool
Mockito.when(vpcVO.getIp4Dns1()).thenReturn(null);
Mockito.when(vpcVO.getIp6Dns1()).thenReturn(null);
}
- Mockito.when(testOrchastrator._vpcMgr.getActiveVpc(vpcId)).thenReturn(vpcVO);
+ Mockito.when(testOrchestrator._vpcMgr.getActiveVpc(vpcId)).thenReturn(vpcVO);
} else {
Mockito.when(routerVO.getVpcId()).thenReturn(null);
Long routerNetworkId = 2L;
@@ -648,13 +656,13 @@ private NicProfile prepareMocksAndRunPrepareNic(VirtualMachine.Type vmType, bool
Mockito.when(routerNetworkVO.getDns1()).thenReturn(null);
Mockito.when(routerNetworkVO.getIp6Dns1()).thenReturn(null);
}
- Mockito.when(testOrchastrator.routerNetworkDao.getRouterNetworks(vmId)).thenReturn(List.of(routerNetworkId));
- Mockito.when(testOrchastrator._networksDao.findById(routerNetworkId)).thenReturn(routerNetworkVO);
+ Mockito.when(testOrchestrator.routerNetworkDao.getRouterNetworks(vmId)).thenReturn(List.of(routerNetworkId));
+ Mockito.when(testOrchestrator._networksDao.findById(routerNetworkId)).thenReturn(routerNetworkVO);
}
- Mockito.when(testOrchastrator.routerDao.findById(vmId)).thenReturn(routerVO);
+ Mockito.when(testOrchestrator.routerDao.findById(vmId)).thenReturn(routerVO);
NicProfile profile = null;
try {
- profile = testOrchastrator.prepareNic(virtualMachineProfile, deployDestination, reservationContext, nicId, network);
+ profile = testOrchestrator.prepareNic(virtualMachineProfile, deployDestination, reservationContext, nicId, network);
} catch (InsufficientCapacityException | ResourceUnavailableException e) {
Assert.fail(String.format("Failure with exception %s", e.getMessage()));
}
@@ -723,7 +731,7 @@ public void testGetNetworkGatewayAndNetmaskForNicImportAdvancedZone() {
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced);
Mockito.when(network.getGateway()).thenReturn(networkGateway);
Mockito.when(network.getCidr()).thenReturn(networkCidr);
- Pair pair = testOrchastrator.getNetworkGatewayAndNetmaskForNicImport(network, dataCenter, ipAddress);
+ Pair pair = testOrchestrator.getNetworkGatewayAndNetmaskForNicImport(network, dataCenter, ipAddress);
Assert.assertNotNull(pair);
Assert.assertEquals(networkGateway, pair.first());
Assert.assertEquals(networkNetmask, pair.second());
@@ -743,9 +751,9 @@ public void testGetNetworkGatewayAndNetmaskForNicImportBasicZone() {
Mockito.when(vlan.getVlanNetmask()).thenReturn(defaultNetworkNetmask);
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic);
Mockito.when(ipAddressVO.getVlanId()).thenReturn(1L);
- Mockito.when(testOrchastrator._vlanDao.findById(1L)).thenReturn(vlan);
- Mockito.when(testOrchastrator._ipAddressDao.findByIp(ipAddress)).thenReturn(ipAddressVO);
- Pair pair = testOrchastrator.getNetworkGatewayAndNetmaskForNicImport(network, dataCenter, ipAddress);
+ Mockito.when(testOrchestrator._vlanDao.findById(1L)).thenReturn(vlan);
+ Mockito.when(testOrchestrator._ipAddressDao.findByIp(ipAddress)).thenReturn(ipAddressVO);
+ Pair pair = testOrchestrator.getNetworkGatewayAndNetmaskForNicImport(network, dataCenter, ipAddress);
Assert.assertNotNull(pair);
Assert.assertEquals(defaultNetworkGateway, pair.first());
Assert.assertEquals(defaultNetworkNetmask, pair.second());
@@ -757,7 +765,7 @@ public void testGetGuestIpForNicImportL2Network() {
DataCenter dataCenter = Mockito.mock(DataCenter.class);
Network.IpAddresses ipAddresses = Mockito.mock(Network.IpAddresses.class);
Mockito.when(network.getGuestType()).thenReturn(GuestType.L2);
- Assert.assertNull(testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses));
+ Assert.assertNull(testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses));
}
@Test
@@ -769,8 +777,8 @@ public void testGetGuestIpForNicImportAdvancedZone() {
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced);
String ipAddress = "10.1.10.10";
Mockito.when(ipAddresses.getIp4Address()).thenReturn(ipAddress);
- Mockito.when(testOrchastrator._ipAddrMgr.acquireGuestIpAddress(network, ipAddress)).thenReturn(ipAddress);
- String guestIp = testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
+ Mockito.when(testOrchestrator._ipAddrMgr.acquireGuestIpAddress(network, ipAddress)).thenReturn(ipAddress);
+ String guestIp = testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
Assert.assertEquals(ipAddress, guestIp);
}
@@ -791,8 +799,8 @@ public void testGetGuestIpForNicImportBasicZoneAutomaticIP() {
Mockito.when(ipAddressVO.getState()).thenReturn(State.Free);
Mockito.when(network.getId()).thenReturn(networkId);
Mockito.when(dataCenter.getId()).thenReturn(dataCenterId);
- Mockito.when(testOrchastrator._ipAddressDao.findBySourceNetworkIdAndDatacenterIdAndState(networkId, dataCenterId, State.Free)).thenReturn(ipAddressVO);
- String ipAddress = testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
+ Mockito.when(testOrchestrator._ipAddressDao.findBySourceNetworkIdAndDatacenterIdAndState(networkId, dataCenterId, State.Free)).thenReturn(ipAddressVO);
+ String ipAddress = testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
Assert.assertEquals(freeIp, ipAddress);
}
@@ -814,8 +822,8 @@ public void testGetGuestIpForNicImportBasicZoneManualIP() {
Mockito.when(network.getId()).thenReturn(networkId);
Mockito.when(dataCenter.getId()).thenReturn(dataCenterId);
Mockito.when(ipAddresses.getIp4Address()).thenReturn(requestedIp);
- Mockito.when(testOrchastrator._ipAddressDao.findByIp(requestedIp)).thenReturn(ipAddressVO);
- String ipAddress = testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
+ Mockito.when(testOrchestrator._ipAddressDao.findByIp(requestedIp)).thenReturn(ipAddressVO);
+ String ipAddress = testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
Assert.assertEquals(requestedIp, ipAddress);
}
@@ -837,7 +845,168 @@ public void testGetGuestIpForNicImportBasicUsedIP() {
Mockito.when(network.getId()).thenReturn(networkId);
Mockito.when(dataCenter.getId()).thenReturn(dataCenterId);
Mockito.when(ipAddresses.getIp4Address()).thenReturn(requestedIp);
- Mockito.when(testOrchastrator._ipAddressDao.findByIp(requestedIp)).thenReturn(ipAddressVO);
- testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
+ Mockito.when(testOrchestrator._ipAddressDao.findByIp(requestedIp)).thenReturn(ipAddressVO);
+ testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
+ }
+
+ @Test
+ public void testShutdownNetworkAcquireLockFailed() {
+ ReservationContext reservationContext = Mockito.mock(ReservationContext.class);
+ NetworkVO network = mock(NetworkVO.class);
+ long networkId = 1;
+ when(testOrchestrator._networksDao.acquireInLockTable(Mockito.anyLong(), Mockito.anyInt())).thenReturn(null);
+
+ boolean shutdownNetworkStatus = testOrchestrator.shutdownNetwork(networkId, reservationContext, false);
+ Assert.assertFalse(shutdownNetworkStatus);
+
+ verify(testOrchestrator._networksDao, times(1)).acquireInLockTable(networkId, NetworkLockTimeout.value());
+ }
+
+ @Test
+ public void testShutdownNetworkInAllocatedState() {
+ ReservationContext reservationContext = Mockito.mock(ReservationContext.class);
+ NetworkVO network = mock(NetworkVO.class);
+ long networkId = 1;
+ when(testOrchestrator._networksDao.acquireInLockTable(Mockito.anyLong(), Mockito.anyInt())).thenReturn(network);
+ when(network.getId()).thenReturn(networkId);
+ when(network.getState()).thenReturn(Network.State.Allocated);
+
+ boolean shutdownNetworkStatus = testOrchestrator.shutdownNetwork(networkId, reservationContext, false);
+ Assert.assertTrue(shutdownNetworkStatus);
+
+ verify(network, times(1)).getState();
+ verify(testOrchestrator._networksDao, times(1)).acquireInLockTable(networkId, NetworkLockTimeout.value());
+ verify(testOrchestrator._networksDao, times(1)).releaseFromLockTable(networkId);
+ }
+
+ @Test
+ public void testShutdownNetworkInImplementingState() {
+ ReservationContext reservationContext = Mockito.mock(ReservationContext.class);
+ NetworkVO network = mock(NetworkVO.class);
+ long networkId = 1;
+ when(testOrchestrator._networksDao.acquireInLockTable(Mockito.anyLong(), Mockito.anyInt())).thenReturn(network);
+ when(network.getId()).thenReturn(networkId);
+ when(network.getState()).thenReturn(Network.State.Implementing);
+
+ boolean shutdownNetworkStatus = testOrchestrator.shutdownNetwork(networkId, reservationContext, false);
+ Assert.assertFalse(shutdownNetworkStatus);
+
+ verify(network, times(3)).getState();
+ verify(testOrchestrator._networksDao, times(1)).acquireInLockTable(networkId, NetworkLockTimeout.value());
+ verify(testOrchestrator._networksDao, times(1)).releaseFromLockTable(networkId);
+ }
+
+ @Test(expected = InsufficientVirtualNetworkCapacityException.class)
+ public void testImportNicAcquireGuestIPFailed() throws Exception {
+ DataCenter dataCenter = Mockito.mock(DataCenter.class);
+ VirtualMachine vm = mock(VirtualMachine.class);
+ Network network = Mockito.mock(Network.class);
+ Mockito.when(network.getGuestType()).thenReturn(GuestType.Isolated);
+ Mockito.when(network.getNetworkOfferingId()).thenReturn(networkOfferingId);
+ long dataCenterId = 1L;
+ Mockito.when(network.getDataCenterId()).thenReturn(dataCenterId);
+ Network.IpAddresses ipAddresses = Mockito.mock(Network.IpAddresses.class);
+ String ipAddress = "10.1.10.10";
+ Mockito.when(ipAddresses.getIp4Address()).thenReturn(ipAddress);
+ Mockito.when(testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses)).thenReturn(null);
+ Mockito.when(testOrchestrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.Dns, Service.Dhcp));
+ String macAddress = "02:01:01:82:00:01";
+ int deviceId = 0;
+ testOrchestrator.importNic(macAddress, deviceId, network, true, vm, ipAddresses, dataCenter, false);
+ }
+
+ @Test(expected = InsufficientVirtualNetworkCapacityException.class)
+ public void testImportNicAutoAcquireGuestIPFailed() throws Exception {
+ DataCenter dataCenter = Mockito.mock(DataCenter.class);
+ VirtualMachine vm = mock(VirtualMachine.class);
+ Network network = Mockito.mock(Network.class);
+ Mockito.when(network.getGuestType()).thenReturn(GuestType.Isolated);
+ Mockito.when(network.getNetworkOfferingId()).thenReturn(networkOfferingId);
+ long dataCenterId = 1L;
+ Mockito.when(network.getDataCenterId()).thenReturn(dataCenterId);
+ Network.IpAddresses ipAddresses = Mockito.mock(Network.IpAddresses.class);
+ String ipAddress = "auto";
+ Mockito.when(ipAddresses.getIp4Address()).thenReturn(ipAddress);
+ Mockito.when(testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses)).thenReturn(null);
+ Mockito.when(testOrchestrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.Dns, Service.Dhcp));
+ String macAddress = "02:01:01:82:00:01";
+ int deviceId = 0;
+ testOrchestrator.importNic(macAddress, deviceId, network, true, vm, ipAddresses, dataCenter, false);
+ }
+
+ @Test
+ public void testImportNicNoIP4Address() throws Exception {
+ DataCenter dataCenter = Mockito.mock(DataCenter.class);
+ Long vmId = 1L;
+ Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM;
+ VirtualMachine vm = mock(VirtualMachine.class);
+ Mockito.when(vm.getId()).thenReturn(vmId);
+ Mockito.when(vm.getHypervisorType()).thenReturn(hypervisorType);
+ Long networkId = 1L;
+ Network network = Mockito.mock(Network.class);
+ Mockito.when(network.getId()).thenReturn(networkId);
+ Network.IpAddresses ipAddresses = Mockito.mock(Network.IpAddresses.class);
+ Mockito.when(ipAddresses.getIp4Address()).thenReturn(null);
+ URI broadcastUri = URI.create("vlan://123");
+ NicVO nic = mock(NicVO.class);
+ Mockito.when(nic.getBroadcastUri()).thenReturn(broadcastUri);
+ String macAddress = "02:01:01:82:00:01";
+ int deviceId = 1;
+ Integer networkRate = 200;
+ Mockito.when(testOrchestrator._networkModel.getNetworkRate(networkId, vmId)).thenReturn(networkRate);
+ Mockito.when(testOrchestrator._networkModel.isSecurityGroupSupportedInNetwork(network)).thenReturn(false);
+ Mockito.when(testOrchestrator._networkModel.getNetworkTag(hypervisorType, network)).thenReturn("testtag");
+ try (MockedStatic transactionMocked = Mockito.mockStatic(Transaction.class)) {
+ transactionMocked.when(() -> Transaction.execute(any(TransactionCallback.class))).thenReturn(nic);
+ Pair nicProfileIntegerPair = testOrchestrator.importNic(macAddress, deviceId, network, true, vm, ipAddresses, dataCenter, false);
+ verify(testOrchestrator._networkModel, times(1)).getNetworkRate(networkId, vmId);
+ verify(testOrchestrator._networkModel, times(1)).isSecurityGroupSupportedInNetwork(network);
+ verify(testOrchestrator._networkModel, times(1)).getNetworkTag(Hypervisor.HypervisorType.KVM, network);
+ assertEquals(deviceId, nicProfileIntegerPair.second().intValue());
+ NicProfile nicProfile = nicProfileIntegerPair.first();
+ assertEquals(broadcastUri, nicProfile.getBroadCastUri());
+ assertEquals(networkRate, nicProfile.getNetworkRate());
+ assertFalse(nicProfile.isSecurityGroupEnabled());
+ assertEquals("testtag", nicProfile.getName());
+ }
+ }
+
+ @Test
+ public void testImportNicWithIP4Address() throws Exception {
+ DataCenter dataCenter = Mockito.mock(DataCenter.class);
+ Long vmId = 1L;
+ Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM;
+ VirtualMachine vm = mock(VirtualMachine.class);
+ Mockito.when(vm.getId()).thenReturn(vmId);
+ Mockito.when(vm.getHypervisorType()).thenReturn(hypervisorType);
+ Long networkId = 1L;
+ Network network = Mockito.mock(Network.class);
+ Mockito.when(network.getId()).thenReturn(networkId);
+ String ipAddress = "10.1.10.10";
+ Network.IpAddresses ipAddresses = Mockito.mock(Network.IpAddresses.class);
+ Mockito.when(ipAddresses.getIp4Address()).thenReturn(ipAddress);
+ URI broadcastUri = URI.create("vlan://123");
+ NicVO nic = mock(NicVO.class);
+ Mockito.when(nic.getBroadcastUri()).thenReturn(broadcastUri);
+ String macAddress = "02:01:01:82:00:01";
+ int deviceId = 1;
+ Integer networkRate = 200;
+ Mockito.when(testOrchestrator._networkModel.getNetworkRate(networkId, vmId)).thenReturn(networkRate);
+ Mockito.when(testOrchestrator._networkModel.isSecurityGroupSupportedInNetwork(network)).thenReturn(false);
+ Mockito.when(testOrchestrator._networkModel.getNetworkTag(hypervisorType, network)).thenReturn("testtag");
+ try (MockedStatic transactionMocked = Mockito.mockStatic(Transaction.class)) {
+ transactionMocked.when(() -> Transaction.execute(any(TransactionCallback.class))).thenReturn(nic);
+ Pair nicProfileIntegerPair = testOrchestrator.importNic(macAddress, deviceId, network, true, vm, ipAddresses, dataCenter, false);
+ verify(testOrchestrator, times(1)).getSelectedIpForNicImport(network, dataCenter, ipAddresses);
+ verify(testOrchestrator._networkModel, times(1)).getNetworkRate(networkId, vmId);
+ verify(testOrchestrator._networkModel, times(1)).isSecurityGroupSupportedInNetwork(network);
+ verify(testOrchestrator._networkModel, times(1)).getNetworkTag(Hypervisor.HypervisorType.KVM, network);
+ assertEquals(deviceId, nicProfileIntegerPair.second().intValue());
+ NicProfile nicProfile = nicProfileIntegerPair.first();
+ assertEquals(broadcastUri, nicProfile.getBroadCastUri());
+ assertEquals(networkRate, nicProfile.getNetworkRate());
+ assertFalse(nicProfile.isSecurityGroupEnabled());
+ assertEquals("testtag", nicProfile.getName());
+ }
}
}
diff --git a/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java b/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java
index 132fd3fe5a23..cd62935f17ee 100644
--- a/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java
+++ b/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java
@@ -135,8 +135,8 @@ public Long getPodId() {
return podId;
}
- public void setPodId(long podId) {
- this.podId = new Long(podId);
+ public void setPodId(Long podId) {
+ this.podId = podId;
}
@Override
@@ -144,8 +144,8 @@ public Long getClusterId() {
return clusterId;
}
- public void setClusterId(long clusterId) {
- this.clusterId = new Long(clusterId);
+ public void setClusterId(Long clusterId) {
+ this.clusterId = clusterId;
}
@Override
diff --git a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java
index 51362cf885e0..6b53e49764e4 100644
--- a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java
+++ b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDao.java
@@ -31,4 +31,6 @@ public interface DomainDetailsDao extends GenericDao {
void deleteDetails(long domainId);
void update(long domainId, Map details);
+
+ String getActualValue(DomainDetailVO domainDetailVO);
}
diff --git a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java
index dad3fe9ad1eb..50097d154f5f 100644
--- a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java
@@ -24,6 +24,7 @@
import com.cloud.domain.DomainDetailVO;
import com.cloud.domain.DomainVO;
+import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.QueryBuilder;
import com.cloud.utils.db.SearchBuilder;
@@ -34,6 +35,7 @@
import org.apache.cloudstack.framework.config.ConfigKey.Scope;
import org.apache.cloudstack.framework.config.ScopedConfigStorage;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
public class DomainDetailsDaoImpl extends GenericDaoBase implements DomainDetailsDao, ScopedConfigStorage {
protected final SearchBuilder domainSearch;
@@ -111,7 +113,7 @@ public String getConfigValue(long id, ConfigKey> key) {
String enableDomainSettingsForChildDomain = _configDao.getValue("enable.domain.settings.for.child.domain");
if (!Boolean.parseBoolean(enableDomainSettingsForChildDomain)) {
vo = findDetail(id, key.key());
- return vo == null ? null : vo.getValue();
+ return vo == null ? null : getActualValue(vo);
}
DomainVO domain = _domainDao.findById(id);
// if value is not configured in domain then check its parent domain till ROOT
@@ -125,6 +127,15 @@ public String getConfigValue(long id, ConfigKey> key) {
break;
}
}
- return vo == null ? null : vo.getValue();
+ return vo == null ? null : getActualValue(vo);
+ }
+
+ @Override
+ public String getActualValue(DomainDetailVO domainDetailVO) {
+ ConfigurationVO configurationVO = _configDao.findByName(domainDetailVO.getName());
+ if (configurationVO != null && configurationVO.isEncrypted()) {
+ return DBEncryptionUtil.decrypt(domainDetailVO.getValue());
+ }
+ return domainDetailVO.getValue();
}
}
diff --git a/engine/schema/src/main/java/com/cloud/host/HostTagVO.java b/engine/schema/src/main/java/com/cloud/host/HostTagVO.java
index cd4ac29738d5..98071a2c0732 100644
--- a/engine/schema/src/main/java/com/cloud/host/HostTagVO.java
+++ b/engine/schema/src/main/java/com/cloud/host/HostTagVO.java
@@ -40,6 +40,9 @@ public class HostTagVO implements InternalIdentity {
@Column(name = "tag")
private String tag;
+ @Column(name = "is_implicit")
+ private boolean isImplicit = false;
+
@Column(name = "is_tag_a_rule")
private boolean isTagARule;
@@ -74,6 +77,13 @@ public boolean getIsTagARule() {
return isTagARule;
}
+ public void setIsImplicit(boolean isImplicit) {
+ this.isImplicit = isImplicit;
+ }
+
+ public boolean getIsImplicit() {
+ return isImplicit;
+ }
@Override
public long getId() {
diff --git a/engine/schema/src/main/java/com/cloud/host/HostVO.java b/engine/schema/src/main/java/com/cloud/host/HostVO.java
index 3e64d20d0e2d..1a507da79570 100644
--- a/engine/schema/src/main/java/com/cloud/host/HostVO.java
+++ b/engine/schema/src/main/java/com/cloud/host/HostVO.java
@@ -16,13 +16,13 @@
// under the License.
package com.cloud.host;
-import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.UUID;
import javax.persistence.Column;
@@ -45,6 +45,7 @@
import org.apache.cloudstack.util.HypervisorTypeConverter;
import org.apache.cloudstack.utils.jsinterpreter.TagAsRuleHelper;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
+import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.BooleanUtils;
import org.apache.commons.lang3.StringUtils;
@@ -768,27 +769,48 @@ public void setUuid(String uuid) {
this.uuid = uuid;
}
- public boolean checkHostServiceOfferingAndTemplateTags(ServiceOffering serviceOffering, VirtualMachineTemplate template) {
- if (serviceOffering == null || template == null) {
- return false;
- }
+ private Set getHostServiceOfferingAndTemplateStrictTags(ServiceOffering serviceOffering, VirtualMachineTemplate template, Set strictHostTags) {
if (StringUtils.isEmpty(serviceOffering.getHostTag()) && StringUtils.isEmpty(template.getTemplateTag())) {
- return true;
+ return new HashSet<>();
}
- if (getHostTags() == null) {
- return false;
- }
- HashSet hostTagsSet = new HashSet<>(getHostTags());
- List tags = new ArrayList<>();
+ List hostTagsList = getHostTags();
+ HashSet hostTagsSet = CollectionUtils.isNotEmpty(hostTagsList) ? new HashSet<>(hostTagsList) : new HashSet<>();
+ HashSet tags = new HashSet<>();
if (StringUtils.isNotEmpty(serviceOffering.getHostTag())) {
tags.addAll(Arrays.asList(serviceOffering.getHostTag().split(",")));
}
- if (StringUtils.isNotEmpty(template.getTemplateTag()) && !tags.contains(template.getTemplateTag())) {
+ if (StringUtils.isNotEmpty(template.getTemplateTag())) {
tags.add(template.getTemplateTag());
}
+ tags.removeIf(tag -> !strictHostTags.contains(tag));
+ tags.removeAll(hostTagsSet);
+ return tags;
+ }
+
+ public boolean checkHostServiceOfferingAndTemplateTags(ServiceOffering serviceOffering, VirtualMachineTemplate template, Set strictHostTags) {
+ if (serviceOffering == null || template == null) {
+ return false;
+ }
+ Set tags = getHostServiceOfferingAndTemplateStrictTags(serviceOffering, template, strictHostTags);
+ if (tags.isEmpty()) {
+ return true;
+ }
+ List hostTagsList = getHostTags();
+ HashSet hostTagsSet = CollectionUtils.isNotEmpty(hostTagsList) ? new HashSet<>(hostTagsList) : new HashSet<>();
return hostTagsSet.containsAll(tags);
}
+ public Set getHostServiceOfferingAndTemplateMissingTags(ServiceOffering serviceOffering, VirtualMachineTemplate template, Set strictHostTags) {
+ Set tags = getHostServiceOfferingAndTemplateStrictTags(serviceOffering, template, strictHostTags);
+ if (tags.isEmpty()) {
+ return new HashSet<>();
+ }
+ List hostTagsList = getHostTags();
+ HashSet hostTagsSet = CollectionUtils.isNotEmpty(hostTagsList) ? new HashSet<>(hostTagsList) : new HashSet<>();
+ tags.removeAll(hostTagsSet);
+ return tags;
+ }
+
public boolean checkHostServiceOfferingTags(ServiceOffering serviceOffering) {
if (serviceOffering == null) {
return false;
diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java
index ca180e2323fd..08380ed8b405 100644
--- a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java
+++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java
@@ -141,6 +141,8 @@ public interface HostDao extends GenericDao, StateDao listByHostCapability(Host.Type type, Long clusterId, Long podId, long dcId, String hostCapabilty);
+ List listByClusterHypervisorTypeAndHostCapability(Long clusterId, HypervisorType hypervisorType, String hostCapabilty);
+
List listByClusterAndHypervisorType(long clusterId, HypervisorType hypervisorType);
HostVO findByName(String name);
diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java
index 5faa877b458f..170c6a45fc3a 100644
--- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java
@@ -341,6 +341,7 @@ public void init() {
ClusterHypervisorSearch.and("hypervisor", ClusterHypervisorSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
ClusterHypervisorSearch.and("type", ClusterHypervisorSearch.entity().getType(), SearchCriteria.Op.EQ);
ClusterHypervisorSearch.and("status", ClusterHypervisorSearch.entity().getStatus(), SearchCriteria.Op.EQ);
+ ClusterHypervisorSearch.and("resourceState", ClusterHypervisorSearch.entity().getResourceState(), SearchCriteria.Op.EQ);
ClusterHypervisorSearch.done();
UnmanagedDirectConnectSearch = createSearchBuilder();
@@ -1506,12 +1507,42 @@ public List listByHostCapability(Type type, Long clusterId, Long podId,
return listBy(sc);
}
+ @Override
+ public List listByClusterHypervisorTypeAndHostCapability(Long clusterId, HypervisorType hypervisorType, String hostCapabilty) {
+ SearchBuilder hostCapabilitySearch = _detailsDao.createSearchBuilder();
+ DetailVO tagEntity = hostCapabilitySearch.entity();
+ hostCapabilitySearch.and("capability", tagEntity.getName(), SearchCriteria.Op.EQ);
+ hostCapabilitySearch.and("value", tagEntity.getValue(), SearchCriteria.Op.EQ);
+
+ SearchBuilder hostSearch = createSearchBuilder();
+ HostVO entity = hostSearch.entity();
+ hostSearch.and("clusterId", entity.getClusterId(), SearchCriteria.Op.EQ);
+ hostSearch.and("hypervisor", entity.getHypervisorType(), SearchCriteria.Op.EQ);
+ hostSearch.and("type", entity.getType(), SearchCriteria.Op.EQ);
+ hostSearch.and("status", entity.getStatus(), SearchCriteria.Op.EQ);
+ hostSearch.and("resourceState", entity.getResourceState(), SearchCriteria.Op.EQ);
+ hostSearch.join("hostCapabilitySearch", hostCapabilitySearch, entity.getId(), tagEntity.getHostId(), JoinBuilder.JoinType.INNER);
+
+ SearchCriteria sc = hostSearch.create();
+ sc.setJoinParameters("hostCapabilitySearch", "value", Boolean.toString(true));
+ sc.setJoinParameters("hostCapabilitySearch", "capability", hostCapabilty);
+
+ sc.setParameters("clusterId", clusterId);
+ sc.setParameters("hypervisor", hypervisorType);
+ sc.setParameters("type", Type.Routing);
+ sc.setParameters("status", Status.Up);
+ sc.setParameters("resourceState", ResourceState.Enabled);
+ return listBy(sc);
+ }
+
+ @Override
public List listByClusterAndHypervisorType(long clusterId, HypervisorType hypervisorType) {
SearchCriteria sc = ClusterHypervisorSearch.create();
sc.setParameters("clusterId", clusterId);
sc.setParameters("hypervisor", hypervisorType);
sc.setParameters("type", Type.Routing);
sc.setParameters("status", Status.Up);
+ sc.setParameters("resourceState", ResourceState.Enabled);
return listBy(sc);
}
diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostTagsDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostTagsDao.java
index d134db334035..7a00829fd44e 100644
--- a/engine/schema/src/main/java/com/cloud/host/dao/HostTagsDao.java
+++ b/engine/schema/src/main/java/com/cloud/host/dao/HostTagsDao.java
@@ -20,6 +20,7 @@
import com.cloud.host.HostTagVO;
import com.cloud.utils.db.GenericDao;
+import org.apache.cloudstack.api.response.HostTagResponse;
import org.apache.cloudstack.framework.config.ConfigKey;
public interface HostTagsDao extends GenericDao {
@@ -35,6 +36,13 @@ public interface HostTagsDao extends GenericDao {
void deleteTags(long hostId);
+ boolean updateImplicitTags(long hostId, List hostTags);
+
+ List getExplicitHostTags(long hostId);
+
List findHostRuleTags();
+ HostTagResponse newHostTagResponse(HostTagVO hostTag);
+
+ List searchByIds(Long... hostTagIds);
}
diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostTagsDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostTagsDaoImpl.java
index 65deb1d1c9b0..4aa14a31cfcf 100644
--- a/engine/schema/src/main/java/com/cloud/host/dao/HostTagsDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/host/dao/HostTagsDaoImpl.java
@@ -16,10 +16,14 @@
// under the License.
package com.cloud.host.dao;
+import java.util.ArrayList;
import java.util.List;
+import org.apache.cloudstack.api.response.HostTagResponse;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.commons.lang3.StringUtils;
import org.springframework.stereotype.Component;
import com.cloud.host.HostTagVO;
@@ -30,14 +34,23 @@
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.db.SearchCriteria.Func;
+import javax.inject.Inject;
+
@Component
public class HostTagsDaoImpl extends GenericDaoBase implements HostTagsDao, Configurable {
protected final SearchBuilder HostSearch;
protected final GenericSearchBuilder DistinctImplictTagsSearch;
+ private final SearchBuilder stSearch;
+ private final SearchBuilder tagIdsearch;
+ private final SearchBuilder ImplicitTagsSearch;
+
+ @Inject
+ private ConfigurationDao _configDao;
public HostTagsDaoImpl() {
HostSearch = createSearchBuilder();
HostSearch.and("hostId", HostSearch.entity().getHostId(), SearchCriteria.Op.EQ);
+ HostSearch.and("isImplicit", HostSearch.entity().getIsImplicit(), SearchCriteria.Op.EQ);
HostSearch.and("isTagARule", HostSearch.entity().getIsTagARule(), SearchCriteria.Op.EQ);
HostSearch.done();
@@ -46,6 +59,19 @@ public HostTagsDaoImpl() {
DistinctImplictTagsSearch.and("hostIds", DistinctImplictTagsSearch.entity().getHostId(), SearchCriteria.Op.IN);
DistinctImplictTagsSearch.and("implicitTags", DistinctImplictTagsSearch.entity().getTag(), SearchCriteria.Op.IN);
DistinctImplictTagsSearch.done();
+
+ stSearch = createSearchBuilder();
+ stSearch.and("idIN", stSearch.entity().getId(), SearchCriteria.Op.IN);
+ stSearch.done();
+
+ tagIdsearch = createSearchBuilder();
+ tagIdsearch.and("id", tagIdsearch.entity().getId(), SearchCriteria.Op.EQ);
+ tagIdsearch.done();
+
+ ImplicitTagsSearch = createSearchBuilder();
+ ImplicitTagsSearch.and("hostId", ImplicitTagsSearch.entity().getHostId(), SearchCriteria.Op.EQ);
+ ImplicitTagsSearch.and("isImplicit", ImplicitTagsSearch.entity().getIsImplicit(), SearchCriteria.Op.EQ);
+ ImplicitTagsSearch.done();
}
@Override
@@ -74,6 +100,36 @@ public void deleteTags(long hostId) {
txn.commit();
}
+ @Override
+ public boolean updateImplicitTags(long hostId, List hostTags) {
+ TransactionLegacy txn = TransactionLegacy.currentTxn();
+ txn.start();
+ SearchCriteria sc = ImplicitTagsSearch.create();
+ sc.setParameters("hostId", hostId);
+ sc.setParameters("isImplicit", true);
+ boolean expunged = expunge(sc) > 0;
+ boolean persisted = false;
+ for (String tag : hostTags) {
+ if (StringUtils.isNotBlank(tag)) {
+ HostTagVO vo = new HostTagVO(hostId, tag.trim());
+ vo.setIsImplicit(true);
+ persist(vo);
+ persisted = true;
+ }
+ }
+ txn.commit();
+ return expunged || persisted;
+ }
+
+ @Override
+ public List getExplicitHostTags(long hostId) {
+ SearchCriteria sc = ImplicitTagsSearch.create();
+ sc.setParameters("hostId", hostId);
+ sc.setParameters("isImplicit", false);
+
+ return search(sc, null);
+ }
+
@Override
public List findHostRuleTags() {
SearchCriteria sc = HostSearch.create();
@@ -89,6 +145,7 @@ public void persist(long hostId, List hostTags, Boolean isTagARule) {
txn.start();
SearchCriteria sc = HostSearch.create();
sc.setParameters("hostId", hostId);
+ sc.setParameters("isImplicit", false);
expunge(sc);
for (String tag : hostTags) {
@@ -110,4 +167,72 @@ public ConfigKey>[] getConfigKeys() {
public String getConfigComponentName() {
return HostTagsDaoImpl.class.getSimpleName();
}
+
+ @Override
+ public HostTagResponse newHostTagResponse(HostTagVO tag) {
+ HostTagResponse tagResponse = new HostTagResponse();
+
+ tagResponse.setName(tag.getTag());
+ tagResponse.setHostId(tag.getHostId());
+ tagResponse.setImplicit(tag.getIsImplicit());
+
+ tagResponse.setObjectName("hosttag");
+
+ return tagResponse;
+ }
+
+ @Override
+ public List searchByIds(Long... tagIds) {
+ String batchCfg = _configDao.getValue("detail.batch.query.size");
+
+ final int detailsBatchSize = batchCfg != null ? Integer.parseInt(batchCfg) : 2000;
+
+ // query details by batches
+ List tagList = new ArrayList<>();
+ int curr_index = 0;
+
+ if (tagIds.length > detailsBatchSize) {
+ while ((curr_index + detailsBatchSize) <= tagIds.length) {
+ Long[] ids = new Long[detailsBatchSize];
+
+ for (int k = 0, j = curr_index; j < curr_index + detailsBatchSize; j++, k++) {
+ ids[k] = tagIds[j];
+ }
+
+ SearchCriteria sc = stSearch.create();
+
+ sc.setParameters("idIN", (Object[])ids);
+
+ List vms = searchIncludingRemoved(sc, null, null, false);
+
+ if (vms != null) {
+ tagList.addAll(vms);
+ }
+
+ curr_index += detailsBatchSize;
+ }
+ }
+
+ if (curr_index < tagIds.length) {
+ int batch_size = (tagIds.length - curr_index);
+ // set the ids value
+ Long[] ids = new Long[batch_size];
+
+ for (int k = 0, j = curr_index; j < curr_index + batch_size; j++, k++) {
+ ids[k] = tagIds[j];
+ }
+
+ SearchCriteria sc = stSearch.create();
+
+ sc.setParameters("idIN", (Object[])ids);
+
+ List tags = searchIncludingRemoved(sc, null, null, false);
+
+ if (tags != null) {
+ tagList.addAll(tags);
+ }
+ }
+
+ return tagList;
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/hypervisor/HypervisorCapabilitiesVO.java b/engine/schema/src/main/java/com/cloud/hypervisor/HypervisorCapabilitiesVO.java
index 4455c7491ddb..a3b03280fdf6 100644
--- a/engine/schema/src/main/java/com/cloud/hypervisor/HypervisorCapabilitiesVO.java
+++ b/engine/schema/src/main/java/com/cloud/hypervisor/HypervisorCapabilitiesVO.java
@@ -80,6 +80,18 @@ public HypervisorCapabilitiesVO(HypervisorType hypervisorType, String hypervisor
this.uuid = UUID.randomUUID().toString();
}
+ public HypervisorCapabilitiesVO(HypervisorCapabilitiesVO source) {
+ this.hypervisorType = source.getHypervisorType();
+ this.hypervisorVersion = source.getHypervisorVersion();
+ this.maxGuestsLimit = source.getMaxGuestsLimit();
+ this.maxDataVolumesLimit = source.getMaxDataVolumesLimit();
+ this.maxHostsPerCluster = source.getMaxHostsPerCluster();
+ this.securityGroupEnabled = source.isSecurityGroupEnabled();
+ this.storageMotionSupported = source.isStorageMotionSupported();
+ this.vmSnapshotEnabled = source.isVmSnapshotEnabled();
+ this.uuid = UUID.randomUUID().toString();
+ }
+
/**
* @param hypervisorType the hypervisorType to set
*/
diff --git a/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDao.java b/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDao.java
index 4b25c63403e2..718511746c2f 100644
--- a/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDao.java
+++ b/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDao.java
@@ -35,4 +35,6 @@ public interface AutoScaleVmGroupVmMapDao extends GenericDao vmIds, Long batchSize);
}
diff --git a/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImpl.java
index 8fca4c26f9a7..1ae55d97da2c 100644
--- a/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImpl.java
@@ -18,7 +18,10 @@
import java.util.List;
+import javax.annotation.PostConstruct;
+import javax.inject.Inject;
+import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
import com.cloud.network.as.AutoScaleVmGroupVmMapVO;
@@ -31,9 +34,6 @@
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.dao.VMInstanceDao;
-import javax.annotation.PostConstruct;
-import javax.inject.Inject;
-
@Component
public class AutoScaleVmGroupVmMapDaoImpl extends GenericDaoBase implements AutoScaleVmGroupVmMapDao {
@@ -115,4 +115,16 @@ public boolean removeByGroup(long vmGroupId) {
sc.setParameters("vmGroupId", vmGroupId);
return remove(sc) >= 0;
}
+
+ @Override
+ public int expungeByVmList(List vmIds, Long batchSize) {
+ if (CollectionUtils.isEmpty(vmIds)) {
+ return 0;
+ }
+ SearchBuilder sb = createSearchBuilder();
+ sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN);
+ SearchCriteria sc = sb.create();
+ sc.setParameters("vmIds", vmIds.toArray());
+ return batchExpunge(sc, batchSize);
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDao.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDao.java
index b1b1e1cf7571..3f8c36ac94ed 100644
--- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDao.java
+++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDao.java
@@ -105,4 +105,6 @@ public interface IPAddressDao extends GenericDao {
void buildQuarantineSearchCriteria(SearchCriteria sc);
IPAddressVO findBySourceNetworkIdAndDatacenterIdAndState(long sourceNetworkId, long dataCenterId, State state);
+
+ int expungeByVmList(List vmIds, Long batchSize);
}
diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java
index ca779f7e9cee..aa143838c343 100644
--- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java
@@ -26,6 +26,7 @@
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.resourcedetail.dao.UserIpAddressDetailsDao;
+import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
import com.cloud.dc.Vlan.VlanType;
@@ -561,4 +562,16 @@ public IPAddressVO findBySourceNetworkIdAndDatacenterIdAndState(long sourceNetwo
sc.setParameters("state", State.Free);
return findOneBy(sc);
}
+
+ @Override
+ public int expungeByVmList(List vmIds, Long batchSize) {
+ if (CollectionUtils.isEmpty(vmIds)) {
+ return 0;
+ }
+ SearchBuilder sb = createSearchBuilder();
+ sb.and("vmIds", sb.entity().getAssociatedWithVmId(), SearchCriteria.Op.IN);
+ SearchCriteria sc = sb.create();
+ sc.setParameters("vmIds", vmIds.toArray());
+ return batchExpunge(sc, batchSize);
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDao.java b/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDao.java
index ac3845beffe4..b1831b407a41 100644
--- a/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDao.java
+++ b/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDao.java
@@ -16,10 +16,14 @@
// under the License.
package com.cloud.network.dao;
+import java.util.List;
+
import com.cloud.utils.db.GenericDao;
public interface InlineLoadBalancerNicMapDao extends GenericDao {
InlineLoadBalancerNicMapVO findByPublicIpAddress(String publicIpAddress);
InlineLoadBalancerNicMapVO findByNicId(long nicId);
+ int expungeByNicList(List nicIds, Long batchSize);
+
}
diff --git a/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDaoImpl.java
index 1c3f231f9c1d..d64ba8b4155f 100644
--- a/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDaoImpl.java
@@ -17,9 +17,13 @@
package com.cloud.network.dao;
+import java.util.List;
+
+import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
import com.cloud.utils.db.GenericDaoBase;
+import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
@Component
@@ -41,4 +45,15 @@ public InlineLoadBalancerNicMapVO findByNicId(long nicId) {
return findOneBy(sc);
}
+ @Override
+ public int expungeByNicList(List nicIds, Long batchSize) {
+ if (CollectionUtils.isEmpty(nicIds)) {
+ return 0;
+ }
+ SearchBuilder sb = createSearchBuilder();
+ sb.and("nicIds", sb.entity().getNicId(), SearchCriteria.Op.IN);
+ SearchCriteria sc = sb.create();
+ sc.setParameters("nicIds", nicIds.toArray());
+ return batchExpunge(sc, batchSize);
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDao.java b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDao.java
index a25534b7010f..be2941d5cb2f 100644
--- a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDao.java
+++ b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDao.java
@@ -42,4 +42,5 @@ public interface LoadBalancerVMMapDao extends GenericDao vmIds, Long batchSize);
}
diff --git a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDaoImpl.java
index b32320a84cb0..dc37cdeefe3d 100644
--- a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDaoImpl.java
@@ -18,11 +18,12 @@
import java.util.List;
-
+import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
+import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Func;
@@ -135,4 +136,16 @@ public List listByLoadBalancerIdAndVmId(long loadBalancerId
sc.addAnd("instanceId", SearchCriteria.Op.EQ, instanceId);
return listBy(sc);
}
+
+ @Override
+ public int expungeByVmList(List vmIds, Long batchSize) {
+ if (CollectionUtils.isEmpty(vmIds)) {
+ return 0;
+ }
+ SearchBuilder sb = createSearchBuilder();
+ sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN);
+ SearchCriteria sc = sb.create();
+ sc.setParameters("vmIds", vmIds.toArray());
+ return batchExpunge(sc, batchSize);
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDao.java b/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDao.java
index ebc0f1af2271..0516e26e13a7 100644
--- a/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDao.java
+++ b/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDao.java
@@ -18,8 +18,12 @@
package com.cloud.network.dao;
+import java.util.List;
+
import com.cloud.utils.db.GenericDao;
public interface OpRouterMonitorServiceDao extends GenericDao {
+ int expungeByVmList(List vmIds, Long batchSize);
+
}
diff --git a/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDaoImpl.java
index 451320ac9b6c..a8e818cfb189 100644
--- a/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDaoImpl.java
@@ -17,10 +17,27 @@
package com.cloud.network.dao;
-import com.cloud.utils.db.GenericDaoBase;
+import java.util.List;
+
+import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
+import com.cloud.utils.db.GenericDaoBase;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
+
@Component
public class OpRouterMonitorServiceDaoImpl extends GenericDaoBase implements OpRouterMonitorServiceDao {
+ @Override
+ public int expungeByVmList(List vmIds, Long batchSize) {
+ if (CollectionUtils.isEmpty(vmIds)) {
+ return 0;
+ }
+ SearchBuilder sb = createSearchBuilder();
+ sb.and("vmIds", sb.entity().getId(), SearchCriteria.Op.IN);
+ SearchCriteria sc = sb.create();
+ sc.setParameters("vmIds", vmIds.toArray());
+ return batchExpunge(sc, batchSize);
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java
index b89d04ad15a0..8cd114b7fc4f 100644
--- a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java
+++ b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java
@@ -47,4 +47,5 @@ public interface PortForwardingRulesDao extends GenericDao listByNetworkAndDestIpAddr(String ip4Address, long networkId);
+ int expungeByVmList(List vmIds, Long batchSize);
}
diff --git a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java
index 29cba516d720..3a404b3f2df3 100644
--- a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java
@@ -20,6 +20,7 @@
import javax.inject.Inject;
+import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
import com.cloud.network.dao.FirewallRulesCidrsDao;
@@ -170,4 +171,16 @@ public PortForwardingRuleVO findByIdAndIp(long id, String secondaryIp) {
sc.setParameters("dstIp", secondaryIp);
return findOneBy(sc);
}
+
+ @Override
+ public int expungeByVmList(List vmIds, Long batchSize) {
+ if (CollectionUtils.isEmpty(vmIds)) {
+ return 0;
+ }
+ SearchBuilder sb = createSearchBuilder();
+ sb.and("vmIds", sb.entity().getVirtualMachineId(), SearchCriteria.Op.IN);
+ SearchCriteria sc = sb.create();
+ sc.setParameters("vmIds", vmIds.toArray());
+ return batchExpunge(sc, batchSize);
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDao.java b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDao.java
index 98fc8c8687b8..5023aaa3794c 100644
--- a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDao.java
+++ b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDao.java
@@ -17,10 +17,12 @@
package com.cloud.secstorage;
import java.util.Date;
+import java.util.List;
import com.cloud.utils.db.GenericDao;
public interface CommandExecLogDao extends GenericDao {
public void expungeExpiredRecords(Date cutTime);
public Integer getCopyCmdCountForSSVM(Long id);
+ int expungeByVmList(List vmIds, Long batchSize);
}
diff --git a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java
index f89a1bbf4ccb..a37acdf60298 100644
--- a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java
@@ -19,6 +19,7 @@
import java.util.Date;
import java.util.List;
+import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
import com.cloud.utils.db.GenericDaoBase;
@@ -57,4 +58,16 @@ public Integer getCopyCmdCountForSSVM(Long id) {
List copyCmds = customSearch(sc, null);
return copyCmds.size();
}
+
+ @Override
+ public int expungeByVmList(List vmIds, Long batchSize) {
+ if (CollectionUtils.isEmpty(vmIds)) {
+ return 0;
+ }
+ SearchBuilder sb = createSearchBuilder();
+ sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN);
+ SearchCriteria sc = sb.create();
+ sc.setParameters("vmIds", vmIds.toArray());
+ return batchExpunge(sc, batchSize);
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java
index d086ad1dac1f..48e63d8e2b55 100644
--- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java
+++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java
@@ -54,7 +54,7 @@ List createSystemServiceOfferings(String name, String uniqueN
List listPublicByCpuAndMemory(Integer cpus, Integer memory);
- ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId);
-
List listByHostTag(String tag);
+
+ ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId, boolean includingRemoved);
}
diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java
index 34ac7c47521d..706dcdc1b7b5 100644
--- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java
@@ -282,10 +282,10 @@ public List listPublicByCpuAndMemory(Integer cpus, Integer me
}
@Override
- public ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId) {
+ public ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId, boolean includingRemoved) {
SearchCriteria sc = SearchComputeOfferingByComputeOnlyDiskOffering.create();
sc.setParameters("disk_offering_id", diskOfferingId);
- List vos = listBy(sc);
+ List vos = includingRemoved ? listIncludingRemovedBy(sc) : listBy(sc);
if (vos.size() == 0) {
return null;
}
diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDao.java
index 998d0bbd724c..171634fb1044 100755
--- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDao.java
+++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDao.java
@@ -57,4 +57,5 @@ public interface SnapshotDao extends GenericDao, StateDao listByIds(Object... ids);
+ List searchByVolumes(List volumeIds);
}
diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java
index 030d10d66827..f5fc9c47d036 100755
--- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java
@@ -18,11 +18,13 @@
import java.sql.PreparedStatement;
import java.sql.ResultSet;
+import java.util.ArrayList;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
+import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
import com.cloud.server.ResourceTag.ResourceObjectType;
@@ -285,4 +287,16 @@ public List listByStatusNotIn(long volumeId, Snapshot.State... statu
sc.setParameters("status", (Object[]) status);
return listBy(sc, null);
}
+
+ @Override
+ public List searchByVolumes(List volumeIds) {
+ if (CollectionUtils.isEmpty(volumeIds)) {
+ return new ArrayList<>();
+ }
+ SearchBuilder sb = createSearchBuilder();
+ sb.and("volumeIds", sb.entity().getVolumeId(), SearchCriteria.Op.IN);
+ SearchCriteria sc = sb.create();
+ sc.setParameters("volumeIds", volumeIds.toArray());
+ return search(sc, null);
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDao.java
index 43bb5b3d4d53..02a0355d92dd 100644
--- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDao.java
+++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDao.java
@@ -18,9 +18,12 @@
*/
package com.cloud.storage.dao;
+import java.util.List;
+
import org.apache.cloudstack.resourcedetail.ResourceDetailsDao;
import com.cloud.utils.db.GenericDao;
public interface SnapshotDetailsDao extends GenericDao, ResourceDetailsDao {
+ public List findDetailsByZoneAndKey(long dcId, String key);
}
diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDaoImpl.java
index e4ae22cd021a..584a24817264 100644
--- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDaoImpl.java
@@ -18,11 +18,44 @@
*/
package com.cloud.storage.dao;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase;
+import com.cloud.utils.db.TransactionLegacy;
+import com.cloud.utils.exception.CloudRuntimeException;
+
public class SnapshotDetailsDaoImpl extends ResourceDetailsDaoBase implements SnapshotDetailsDao {
+ private static final String GET_SNAPSHOT_DETAILS_ON_ZONE = "SELECT s.* FROM snapshot_details s LEFT JOIN snapshots ss ON ss.id=s.snapshot_id WHERE ss.data_center_id = ? AND s.name = ?";
+
@Override
public void addDetail(long resourceId, String key, String value, boolean display) {
super.addDetail(new SnapshotDetailsVO(resourceId, key, value, display));
}
+
+ public List findDetailsByZoneAndKey(long dcId, String key) {
+ StringBuilder sql = new StringBuilder(GET_SNAPSHOT_DETAILS_ON_ZONE);
+ TransactionLegacy txn = TransactionLegacy.currentTxn();
+ List snapshotDetailsOnZone = new ArrayList();
+ try (PreparedStatement pstmt = txn.prepareStatement(sql.toString());) {
+ if (pstmt != null) {
+ pstmt.setLong(1, dcId);
+ pstmt.setString(2, key);
+ try (ResultSet rs = pstmt.executeQuery();) {
+ while (rs.next()) {
+ snapshotDetailsOnZone.add(toEntityBean(rs, false));
+ }
+ } catch (SQLException e) {
+ throw new CloudRuntimeException("Could not find details by given zone and key due to:" + e.getMessage(), e);
+ }
+ }
+ return snapshotDetailsOnZone;
+ } catch (SQLException e) {
+ throw new CloudRuntimeException("Could not find details by given zone and key due to:" + e.getMessage(), e);
+ }
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java
index b099a6d6bdbb..62ef5b7570d1 100644
--- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java
+++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java
@@ -41,4 +41,6 @@ public interface StoragePoolHostDao extends GenericDao
public void deleteStoragePoolHostDetails(long hostId, long poolId);
List listByHostId(long hostId);
+
+ Pair, Integer> listByPoolIdNotInCluster(long clusterId, long poolId);
}
diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java
index 9e7bdca11817..987a42f410e7 100644
--- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java
@@ -23,12 +23,18 @@
import java.util.List;
import java.util.stream.Collectors;
+import javax.annotation.PostConstruct;
+import javax.inject.Inject;
+
import org.springframework.stereotype.Component;
+import com.cloud.host.HostVO;
import com.cloud.host.Status;
+import com.cloud.host.dao.HostDao;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.utils.Pair;
import com.cloud.utils.db.GenericDaoBase;
+import com.cloud.utils.db.JoinBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.TransactionLegacy;
@@ -40,6 +46,11 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase HostSearch;
protected final SearchBuilder PoolHostSearch;
+ protected SearchBuilder poolNotInClusterSearch;
+
+ @Inject
+ HostDao hostDao;
+
protected static final String HOST_FOR_POOL_SEARCH = "SELECT * FROM storage_pool_host_ref ph, host h where ph.host_id = h.id and ph.pool_id=? and h.status=? ";
protected static final String HOSTS_FOR_POOLS_SEARCH = "SELECT DISTINCT(ph.host_id) FROM storage_pool_host_ref ph, host h WHERE ph.host_id = h.id AND h.status = 'Up' AND resource_state = 'Enabled' AND ph.pool_id IN (?)";
@@ -68,6 +79,15 @@ public StoragePoolHostDaoImpl() {
}
+ @PostConstruct
+ public void init(){
+ poolNotInClusterSearch = createSearchBuilder();
+ poolNotInClusterSearch.and("poolId", poolNotInClusterSearch.entity().getPoolId(), SearchCriteria.Op.EQ);
+ SearchBuilder hostSearch = hostDao.createSearchBuilder();
+ poolNotInClusterSearch.join("hostSearch", hostSearch, hostSearch.entity().getId(), poolNotInClusterSearch.entity().getHostId(), JoinBuilder.JoinType.INNER);
+ hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.NEQ);
+ }
+
@Override
public List listByPoolId(long id) {
SearchCriteria sc = PoolSearch.create();
@@ -194,4 +214,12 @@ public void deleteStoragePoolHostDetails(long hostId, long poolId) {
remove(sc);
txn.commit();
}
+
+ @Override
+ public Pair, Integer> listByPoolIdNotInCluster(long clusterId, long poolId) {
+ SearchCriteria sc = poolNotInClusterSearch.create();
+ sc.setParameters("poolId", poolId);
+ sc.setJoinParameters("hostSearch", "clusterId", clusterId);
+ return searchAndCount(sc, null);
+ }
}
diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java
index 4e9c63699ca9..e6ffca06f9e0 100644
--- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java
+++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java
@@ -112,7 +112,8 @@ public interface VolumeDao extends GenericDao, StateDao virtualRouters);
@@ -158,4 +159,7 @@ public interface VolumeDao extends GenericDao, StateDao listAllocatedVolumesForAccountDiskOfferingIdsAndNotForVms(long accountId, List diskOfferingIds, List vmIds);
+ List searchRemovedByVms(List vmIds, Long batchSize);
+
+ VolumeVO findOneByIScsiName(String iScsiName);
}
diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java
index 31d64daf147c..0c4d707635aa 100644
--- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java
@@ -27,14 +27,12 @@
import javax.inject.Inject;
-import com.cloud.configuration.Resource;
-import com.cloud.utils.db.Transaction;
-import com.cloud.utils.db.TransactionCallback;
import org.apache.cloudstack.reservation.ReservationVO;
import org.apache.cloudstack.reservation.dao.ReservationDao;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
+import com.cloud.configuration.Resource;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.server.ResourceTag.ResourceObjectType;
@@ -48,12 +46,15 @@
import com.cloud.tags.dao.ResourceTagDao;
import com.cloud.utils.Pair;
import com.cloud.utils.db.DB;
+import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
+import com.cloud.utils.db.Transaction;
+import com.cloud.utils.db.TransactionCallback;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.db.UpdateBuilder;
import com.cloud.utils.exception.CloudRuntimeException;
@@ -76,12 +77,12 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol
protected GenericSearchBuilder primaryStorageSearch2;
protected GenericSearchBuilder secondaryStorageSearch;
private final SearchBuilder poolAndPathSearch;
+
@Inject
ReservationDao reservationDao;
@Inject
- ResourceTagDao _tagsDao;
+ ResourceTagDao tagsDao;
- protected static final String SELECT_VM_SQL = "SELECT DISTINCT instance_id from volumes v where v.host_id = ? and v.mirror_state = ?";
// need to account for zone-wide primary storage where storage_pool has
// null-value pod and cluster, where hypervisor information is stored in
// storage_pool
@@ -395,6 +396,7 @@ public VolumeDaoImpl() {
AllFieldsSearch.and("updatedCount", AllFieldsSearch.entity().getUpdatedCount(), Op.EQ);
AllFieldsSearch.and("name", AllFieldsSearch.entity().getName(), Op.EQ);
AllFieldsSearch.and("passphraseId", AllFieldsSearch.entity().getPassphraseId(), Op.EQ);
+ AllFieldsSearch.and("iScsiName", AllFieldsSearch.entity().get_iScsiName(), Op.EQ);
AllFieldsSearch.done();
RootDiskStateSearch = createSearchBuilder();
@@ -502,7 +504,6 @@ public VolumeDaoImpl() {
poolAndPathSearch.and("poolId", poolAndPathSearch.entity().getPoolId(), Op.EQ);
poolAndPathSearch.and("path", poolAndPathSearch.entity().getPath(), Op.EQ);
poolAndPathSearch.done();
-
}
@Override
@@ -740,7 +741,7 @@ public boolean remove(Long id) {
logger.debug(String.format("Removing volume %s from DB", id));
VolumeVO entry = findById(id);
if (entry != null) {
- _tagsDao.removeByIdAndType(id, ResourceObjectType.Volume);
+ tagsDao.removeByIdAndType(id, ResourceObjectType.Volume);
}
boolean result = super.remove(id);
@@ -763,7 +764,7 @@ public boolean updateUuid(long srcVolId, long destVolId) {
destVol.setInstanceId(instanceId);
update(srcVolId, srcVol);
update(destVolId, destVol);
- _tagsDao.updateResourceId(srcVolId, destVolId, ResourceObjectType.Volume);
+ tagsDao.updateResourceId(srcVolId, destVolId, ResourceObjectType.Volume);
} catch (Exception e) {
throw new CloudRuntimeException("Unable to persist the sequence number for this host");
}
@@ -896,4 +897,24 @@ public VolumeVO persist(VolumeVO entity) {
return volume;
});
}
+
+ @Override
+ public List searchRemovedByVms(List vmIds, Long batchSize) {
+ if (CollectionUtils.isEmpty(vmIds)) {
+ return new ArrayList<>();
+ }
+ SearchBuilder