From 7d7e5dfca83770a4eba91a122a77cd77255da89a Mon Sep 17 00:00:00 2001 From: Daan Hoogland Date: Mon, 22 Dec 2025 19:56:33 +0100 Subject: [PATCH] Deal with Storage Manager tech debt --- .../com/cloud/storage/StorageService.java | 16 +- .../admin/host/AddSecondaryStorageCmd.java | 24 +- .../admin/storage/AddImageStoreCmd.java | 32 +- .../admin/storage/AddImageStoreS3CMD.java | 44 +- .../admin/storage/CreateStoragePoolCmd.java | 74 ++- .../admin/storage/UpdateStoragePoolCmd.java | 2 +- .../api/command/admin/swift/AddSwiftCmd.java | 26 +- .../configuration/ConfigurationManager.java | 47 ++ .../com/cloud/storage/StorageManager.java | 60 +- .../manager/StorageCacheManagerImpl.java | 3 +- .../driver/S3ImageStoreDriverImpl.java | 12 +- .../driver/SwiftImageStoreDriverImpl.java | 19 +- .../java/com/cloud/configuration/Config.java | 65 -- .../ConfigurationManagerImpl.java | 3 +- .../com/cloud/storage/StorageManagerImpl.java | 592 +++++------------- 15 files changed, 375 insertions(+), 644 deletions(-) diff --git a/api/src/main/java/com/cloud/storage/StorageService.java b/api/src/main/java/com/cloud/storage/StorageService.java index a29c8f6aecef..65f0904ab886 100644 --- a/api/src/main/java/com/cloud/storage/StorageService.java +++ b/api/src/main/java/com/cloud/storage/StorageService.java @@ -17,7 +17,6 @@ package com.cloud.storage; -import java.net.UnknownHostException; import java.util.Map; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; @@ -35,10 +34,8 @@ import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import com.cloud.exception.DiscoveryException; -import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; -import com.cloud.exception.ResourceInUseException; import com.cloud.exception.ResourceUnavailableException; import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd; import org.apache.cloudstack.api.command.admin.storage.heuristics.RemoveSecondaryStorageSelectorCmd; @@ -55,12 +52,9 @@ public interface StorageService { * storage pool. * @return * The StoragePool created. - * @throws ResourceInUseException * @throws IllegalArgumentException - * @throws UnknownHostException - * @throws ResourceUnavailableException */ - StoragePool createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException; + StoragePool createPool(CreateStoragePoolCmd cmd) throws IllegalArgumentException; ImageStore createSecondaryStagingStore(CreateSecondaryStagingStoreCmd cmd); @@ -79,10 +73,8 @@ public interface StorageService { * @param primaryStorageId * - the primaryStorageId * @return the primary storage pool - * @throws ResourceUnavailableException - * @throws InsufficientCapacityException */ - StoragePool preparePrimaryStorageForMaintenance(Long primaryStorageId) throws ResourceUnavailableException, InsufficientCapacityException; + StoragePool preparePrimaryStorageForMaintenance(Long primaryStorageId); /** * Complete maintenance for primary storage @@ -108,7 +100,7 @@ public interface StorageService { boolean deleteSecondaryStagingStore(DeleteSecondaryStagingStoreCmd cmd); - ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException; + ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, InvalidParameterValueException; /** * Migrate existing NFS to use object store. @@ -134,7 +126,7 @@ public interface StorageService { void removeSecondaryStorageHeuristic(RemoveSecondaryStorageSelectorCmd cmd); - ObjectStore discoverObjectStore(String name, String url, Long size, String providerName, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException; + ObjectStore discoverObjectStore(String name, String url, Long size, String providerName, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException; boolean deleteObjectStore(DeleteObjectStoragePoolCmd cmd); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java index 9a7eff7e2e59..1553d184a949 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import com.cloud.exception.DiscoveryException; import com.cloud.storage.ImageStore; import com.cloud.user.Account; @@ -67,20 +66,15 @@ public long getEntityOwnerId() { @Override public void execute(){ - try{ - ImageStore result = _storageService.discoverImageStore(null, getUrl(), "NFS", getZoneId(), null); - ImageStoreResponse storeResponse = null; - if (result != null ) { - storeResponse = _responseGenerator.createImageStoreResponse(result); - storeResponse.setResponseName(getCommandName()); - storeResponse.setObjectName("secondarystorage"); - setResponseObject(storeResponse); - } else { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add secondary storage"); - } - } catch (DiscoveryException ex) { - logger.warn("Exception: ", ex); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + ImageStore result = _storageService.discoverImageStore(null, getUrl(), "NFS", getZoneId(), null); + ImageStoreResponse storeResponse = null; + if (result != null ) { + storeResponse = _responseGenerator.createImageStoreResponse(result); + storeResponse.setResponseName(getCommandName()); + storeResponse.setObjectName("secondarystorage"); + setResponseObject(storeResponse); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add secondary storage"); } } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreCmd.java index 72e2e96fe57b..4f50b77d5f82 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreCmd.java @@ -18,7 +18,6 @@ import java.util.Collection; import java.util.HashMap; -import java.util.Iterator; import java.util.Map; @@ -31,7 +30,6 @@ import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import com.cloud.exception.DiscoveryException; import com.cloud.storage.ImageStore; import com.cloud.user.Account; @@ -79,11 +77,10 @@ public Long getZoneId() { public Map getDetails() { Map detailsMap = null; if (details != null && !details.isEmpty()) { - detailsMap = new HashMap(); + detailsMap = new HashMap<>(); Collection props = details.values(); - Iterator iter = props.iterator(); - while (iter.hasNext()) { - HashMap detail = (HashMap)iter.next(); + for (Object prop : props) { + HashMap detail = (HashMap) prop; String key = detail.get("key"); String value = detail.get("value"); detailsMap.put(key, value); @@ -123,20 +120,15 @@ public long getEntityOwnerId() { @Override public void execute(){ - try{ - ImageStore result = _storageService.discoverImageStore(getName(), getUrl(), getProviderName(), getZoneId(), getDetails()); - ImageStoreResponse storeResponse = null; - if (result != null) { - storeResponse = _responseGenerator.createImageStoreResponse(result); - storeResponse.setResponseName(getCommandName()); - storeResponse.setObjectName("imagestore"); - setResponseObject(storeResponse); - } else { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add secondary storage"); - } - } catch (DiscoveryException ex) { - logger.warn("Exception: ", ex); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + ImageStore result = _storageService.discoverImageStore(getName(), getUrl(), getProviderName(), getZoneId(), getDetails()); + ImageStoreResponse storeResponse; + if (result != null) { + storeResponse = _responseGenerator.createImageStoreResponse(result); + storeResponse.setResponseName(getCommandName()); + storeResponse.setObjectName("imagestore"); + setResponseObject(storeResponse); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add secondary storage"); } } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreS3CMD.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreS3CMD.java index 2fe3c7cd106a..6a8b10b8e85e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreS3CMD.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreS3CMD.java @@ -48,15 +48,14 @@ import org.apache.cloudstack.api.response.ImageStoreResponse; import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.DiscoveryException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.storage.ImageStore; -@APICommand(name = "addImageStoreS3", description = "Adds S3 Image Store", responseObject = ImageStoreResponse.class, since = "4.7.0", - requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) +@APICommand(name = "addImageStoreS3", description = "Adds S3 Image Store", responseObject = ImageStoreResponse.class, + since = "4.7.0", responseHasSensitiveInfo = false) public final class AddImageStoreS3CMD extends BaseCmd implements ClientOptions { private static final String s_name = "addImageStoreS3Response"; @@ -73,32 +72,32 @@ public final class AddImageStoreS3CMD extends BaseCmd implements ClientOptions { @Parameter(name = S3_BUCKET_NAME, type = STRING, required = true, description = "Name of the storage bucket") private String bucketName; - @Parameter(name = S3_SIGNER, type = STRING, required = false, description = "Signer Algorithm to use, either S3SignerType or AWSS3V4SignerType") + @Parameter(name = S3_SIGNER, type = STRING, description = "Signer Algorithm to use, either S3SignerType or AWSS3V4SignerType") private String signer; - @Parameter(name = S3_HTTPS_FLAG, type = BOOLEAN, required = false, description = "Use HTTPS instead of HTTP") + @Parameter(name = S3_HTTPS_FLAG, type = BOOLEAN, description = "Use HTTPS instead of HTTP") private Boolean httpsFlag; - @Parameter(name = S3_CONNECTION_TIMEOUT, type = INTEGER, required = false, description = "Connection timeout (milliseconds)") + @Parameter(name = S3_CONNECTION_TIMEOUT, type = INTEGER, description = "Connection timeout (milliseconds)") private Integer connectionTimeout; - @Parameter(name = S3_MAX_ERROR_RETRY, type = INTEGER, required = false, description = "Maximum number of times to retry on error") + @Parameter(name = S3_MAX_ERROR_RETRY, type = INTEGER, description = "Maximum number of times to retry on error") private Integer maxErrorRetry; - @Parameter(name = S3_SOCKET_TIMEOUT, type = INTEGER, required = false, description = "Socket timeout (milliseconds)") + @Parameter(name = S3_SOCKET_TIMEOUT, type = INTEGER, description = "Socket timeout (milliseconds)") private Integer socketTimeout; - @Parameter(name = S3_CONNECTION_TTL, type = INTEGER, required = false, description = "Connection TTL (milliseconds)") + @Parameter(name = S3_CONNECTION_TTL, type = INTEGER, description = "Connection TTL (milliseconds)") private Integer connectionTtl; - @Parameter(name = S3_USE_TCP_KEEPALIVE, type = BOOLEAN, required = false, description = "Whether TCP keep-alive is used") + @Parameter(name = S3_USE_TCP_KEEPALIVE, type = BOOLEAN, description = "Whether TCP keep-alive is used") private Boolean useTCPKeepAlive; @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { - Map dm = new HashMap(); + Map dm = new HashMap<>(); dm.put(ApiConstants.S3_ACCESS_KEY, getAccessKey()); dm.put(ApiConstants.S3_SECRET_KEY, getSecretKey()); @@ -127,20 +126,15 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE dm.put(ApiConstants.S3_USE_TCP_KEEPALIVE, getUseTCPKeepAlive().toString()); } - try{ - ImageStore result = _storageService.discoverImageStore(null, null, "S3", null, dm); - ImageStoreResponse storeResponse; - if (result != null) { - storeResponse = _responseGenerator.createImageStoreResponse(result); - storeResponse.setResponseName(getCommandName()); - storeResponse.setObjectName("imagestore"); - setResponseObject(storeResponse); - } else { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add S3 Image Store."); - } - } catch (DiscoveryException ex) { - logger.warn("Exception: ", ex); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + ImageStore result = _storageService.discoverImageStore(null, null, "S3", null, dm); + ImageStoreResponse storeResponse; + if (result != null) { + storeResponse = _responseGenerator.createImageStoreResponse(result); + storeResponse.setResponseName(getCommandName()); + storeResponse.setObjectName("imagestore"); + setResponseObject(storeResponse); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add S3 Image Store."); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java index 2aef856f58f3..6b5bc633d53e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.storage; -import java.net.UnknownHostException; import java.util.Map; @@ -31,8 +30,6 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import com.cloud.exception.ResourceInUseException; -import com.cloud.exception.ResourceUnavailableException; import com.cloud.storage.StoragePool; import com.cloud.user.Account; @@ -46,53 +43,85 @@ public class CreateStoragePoolCmd extends BaseCmd { //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "The cluster ID for the storage pool") + @Parameter(name = ApiConstants.CLUSTER_ID, + type = CommandType.UUID, + entityType = ClusterResponse.class, + description = "The cluster ID for the storage pool") private Long clusterId; - @Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, description = "The details for the storage pool") + @Parameter(name = ApiConstants.DETAILS, + type = CommandType.MAP, + description = "The details for the storage pool") private Map details; - @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "The name for the storage pool") + @Parameter(name = ApiConstants.NAME, + type = CommandType.STRING, + required = true, + description = "The name for the storage pool") private String storagePoolName; - @Parameter(name = ApiConstants.POD_ID, type = CommandType.UUID, entityType = PodResponse.class, description = "The Pod ID for the storage pool") + @Parameter(name = ApiConstants.POD_ID, + type = CommandType.UUID, + entityType = PodResponse.class, + description = "The Pod ID for the storage pool") private Long podId; - @Parameter(name = ApiConstants.TAGS, type = CommandType.STRING, description = "The tags for the storage pool") + @Parameter(name = ApiConstants.TAGS, + type = CommandType.STRING, + description = "The tags for the storage pool") private String tags; - @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, type = CommandType.STRING, + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, + type = CommandType.STRING, description = "comma separated list of storage access groups for connecting to hosts having those specific groups", since = "4.21.0") private String storageAccessGroups; - @Parameter(name = ApiConstants.URL, type = CommandType.STRING, required = true, description = "The URL of the storage pool") + @Parameter(name = ApiConstants.URL, + type = CommandType.STRING, + required = true, + description = "The URL of the storage pool") private String url; - @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, required = true, description = "The Zone ID for the storage pool") + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + required = true, + description = "The Zone ID for the storage pool") private Long zoneId; - @Parameter(name = ApiConstants.PROVIDER, type = CommandType.STRING, required = false, description = "The storage provider name") + @Parameter(name = ApiConstants.PROVIDER, + type = CommandType.STRING, + description = "The storage provider name") private String storageProviderName; - @Parameter(name = ApiConstants.SCOPE, type = CommandType.STRING, required = false, description = "The scope of the storage: cluster or zone") + @Parameter(name = ApiConstants.SCOPE, + type = CommandType.STRING, + description = "The scope of the storage: cluster or zone") private String scope; - @Parameter(name = ApiConstants.MANAGED, type = CommandType.BOOLEAN, required = false, description = "Whether the storage should be managed by CloudStack") + @Parameter(name = ApiConstants.MANAGED, + type = CommandType.BOOLEAN, + description = "Whether the storage should be managed by CloudStack") private Boolean managed; - @Parameter(name = ApiConstants.CAPACITY_IOPS, type = CommandType.LONG, required = false, description = "IOPS CloudStack can provision from this storage pool") + @Parameter(name = ApiConstants.CAPACITY_IOPS, + type = CommandType.LONG, + description = "IOPS CloudStack can provision from this storage pool") private Long capacityIops; - @Parameter(name = ApiConstants.CAPACITY_BYTES, type = CommandType.LONG, required = false, description = "Bytes CloudStack can provision from this storage pool") + @Parameter(name = ApiConstants.CAPACITY_BYTES, + type = CommandType.LONG, + description = "Bytes CloudStack can provision from this storage pool") private Long capacityBytes; @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, - required = false, description = "Hypervisor type of the hosts in zone that will be attached to this storage pool. KVM, VMware supported as of now.") private String hypervisor; - @Parameter(name = ApiConstants.IS_TAG_A_RULE, type = CommandType.BOOLEAN, description = ApiConstants.PARAMETER_DESCRIPTION_IS_TAG_A_RULE) + @Parameter(name = ApiConstants.IS_TAG_A_RULE, + type = CommandType.BOOLEAN, + description = ApiConstants.PARAMETER_DESCRIPTION_IS_TAG_A_RULE) private Boolean isTagARule; ///////////////////////////////////////////////////// @@ -175,15 +204,6 @@ public void execute() { } else { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add storage pool"); } - } catch (ResourceUnavailableException ex1) { - logger.warn("Exception: ", ex1); - throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex1.getMessage()); - } catch (ResourceInUseException ex2) { - logger.warn("Exception: ", ex2); - throw new ServerApiException(ApiErrorCode.RESOURCE_IN_USE_ERROR, ex2.getMessage()); - } catch (UnknownHostException ex3) { - logger.warn("Exception: ", ex3); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex3.getMessage()); } catch (Exception ex4) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex4.getMessage()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java index 4b0a6ba00b28..f37559cc4043 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java @@ -131,7 +131,7 @@ public ApiCommandResourceType getApiResourceType() { return ApiCommandResourceType.StoragePool; } - public Map getDetails() { + public Map getDetails() { return details; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/AddSwiftCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/AddSwiftCmd.java index cc0c77348a90..c322db8581a2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/AddSwiftCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/AddSwiftCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.ImageStoreResponse; -import com.cloud.exception.DiscoveryException; import com.cloud.storage.ImageStore; import com.cloud.user.Account; @@ -83,25 +82,20 @@ public long getEntityOwnerId() { @Override public void execute() { - Map dm = new HashMap(); + Map dm = new HashMap<>(); dm.put(ApiConstants.ACCOUNT, getAccount()); dm.put(ApiConstants.USERNAME, getUsername()); dm.put(ApiConstants.KEY, getKey()); - try{ - ImageStore result = _storageService.discoverImageStore(null, getUrl(), "Swift", null, dm); - ImageStoreResponse storeResponse = null; - if (result != null) { - storeResponse = _responseGenerator.createImageStoreResponse(result); - storeResponse.setResponseName(getCommandName()); - storeResponse.setObjectName("secondarystorage"); - setResponseObject(storeResponse); - } else { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add Swift secondary storage"); - } - } catch (DiscoveryException ex) { - logger.warn("Exception: ", ex); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + ImageStore result = _storageService.discoverImageStore(null, getUrl(), "Swift", null, dm); + ImageStoreResponse storeResponse; + if (result != null) { + storeResponse = _responseGenerator.createImageStoreResponse(result); + storeResponse.setResponseName(getCommandName()); + storeResponse.setObjectName("secondarystorage"); + setResponseObject(storeResponse); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add Swift secondary storage"); } } } diff --git a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java index 5909d098db8b..fd87b0943991 100644 --- a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java +++ b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java @@ -71,6 +71,53 @@ public interface ConfigurationManager { "Weight for CPU (as a value between 0 and 1) applied to compute capacity for Pods, Clusters and Hosts for COMBINED capacityType for ordering. Weight for RAM will be (1 - weight of CPU)", true, ConfigKey.Scope.Global); + ConfigKey ExpungeDelay = new ConfigKey<>( + ConfigKey.CATEGORY_ADVANCED, + Integer.class, + "expunge.delay", + "86400", + "Determines how long (in seconds) to wait before actually expunging destroyed vm. The default value = the default value of expunge.interval", + false, + ConfigKey.Scope.Global, + null); + ConfigKey ExpungeInterval = new ConfigKey<>( + ConfigKey.CATEGORY_ADVANCED, + Integer.class, + "expunge.interval", + "86400", + "The interval (in seconds) to wait before running the expunge thread.", + false, + ConfigKey.Scope.Global, + null); + ConfigKey ExpungeWorkers = new ConfigKey<>( + ConfigKey.CATEGORY_ADVANCED, + Integer.class, + "expunge.workers", + "10", + "Number of workers performing expunge", + false, + ConfigKey.Scope.Global, + null); + + ConfigKey ExtractURLCleanUpInterval = new ConfigKey<>( + ConfigKey.CATEGORY_ADVANCED, + Integer.class, + "extract.url.cleanup.interval", + "7200", + "The interval (in seconds) to wait before cleaning up the extract URL's ", + false, + ConfigKey.Scope.Global, + null); + ConfigKey ExtractURLExpirationInterval = new ConfigKey<>( + ConfigKey.CATEGORY_ADVANCED, + Integer.class, + "extract.url.expiration.interval", + "14400", + "The life of an extract URL after which it is deleted ", + false, + ConfigKey.Scope.Global, + null); + /** * Is this for a VPC * @param offering the offering to check diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 99504d4bc458..32a4bab3d02d 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -232,6 +232,43 @@ public interface StorageManager extends StorageService { "Storage", "true", "Allow SSVMs to try copying public templates from one secondary storage to another instead of downloading them from the source.", true, ConfigKey.Scope.Zone, null); + ConfigKey VmDiskThrottlingIopsReadRate = new ConfigKey<>( + Integer.class, + "vm.disk.throttling.iops_read_rate", + "Storage", + "0", + "Default disk I/O read rate in requests per second allowed in User vm's disk.", + true, + ConfigKey.Scope.Global, + null); + ConfigKey VmDiskThrottlingIopsWriteRate = new ConfigKey<>( + Integer.class, + "vm.disk.throttling.iops_write_rate", + "Storage", + "0", + "Default disk I/O writerate in requests per second allowed in User vm's disk.", + true, + ConfigKey.Scope.Global, + null); + ConfigKey VmDiskThrottlingBytesReadRate = new ConfigKey<>( + Integer.class, + "vm.disk.throttling.bytes_read_rate", + "Storage", + "0", + "Default disk I/O read rate in bytes per second allowed in User vm's disk.", + true, + ConfigKey.Scope.Global, + null); + ConfigKey VmDiskThrottlingBytesWriteRate = new ConfigKey<>( + Integer.class, + "vm.disk.throttling.bytes_write_rate", + "Advanced", + "0", + "Default disk I/O writerate in bytes per second allowed in User vm's disk.", + true, + ConfigKey.Scope.Global, + null); + /** * should we execute in sequence not involving any storages? * @return true if commands should execute in sequence @@ -241,8 +278,8 @@ static boolean shouldExecuteInSequenceOnVmware() { } static boolean shouldExecuteInSequenceOnVmware(Long srcStoreId, Long dstStoreId) { - final Boolean fullClone = getFullCloneConfiguration(srcStoreId) || getFullCloneConfiguration(dstStoreId); - final Boolean allowParallel = getAllowParallelExecutionConfiguration(); + final boolean fullClone = getFullCloneConfiguration(srcStoreId) || getFullCloneConfiguration(dstStoreId); + final boolean allowParallel = getAllowParallelExecutionConfiguration(); return fullClone && !allowParallel; } @@ -284,10 +321,6 @@ static Boolean getFullCloneConfiguration(Long storeId) { boolean canPoolProvideStorageStats(StoragePool pool); - boolean poolProvidesCustomStorageStats(StoragePool pool); - - Map getCustomStorageStats(StoragePool pool); - /** * Checks if a host has running VMs that are using its local storage pool. * @return true if local storage is active on the host @@ -300,8 +333,6 @@ static Boolean getFullCloneConfiguration(Long storeId) { */ void cleanupStorage(boolean recurring); - String getPrimaryStorageNameLabel(VolumeVO volume); - void createCapacityEntry(StoragePoolVO storagePool, short capacityType, long allocated); Answer sendToPool(StoragePool pool, long[] hostIdsToTryFirst, Command cmd) throws StorageUnavailableException; @@ -314,8 +345,6 @@ static Boolean getFullCloneConfiguration(Long storeId) { CapacityVO getStoragePoolUsedStats(Long zoneId, Long podId, Long clusterId, List poolIds); - List ListByDataCenterHypervisor(long datacenterId, HypervisorType type); - List listByStoragePool(long storagePoolId); StoragePoolVO findLocalStorageOnHost(long hostId); @@ -328,11 +357,9 @@ static Boolean getFullCloneConfiguration(Long storeId) { boolean canHostPrepareStoragePoolAccess(Host host, StoragePool pool); - boolean canDisconnectHostFromStoragePool(Host host, StoragePool pool); - Host getHost(long hostId); - Host updateSecondaryStorage(long secStorageId, String newUrl); + void updateSecondaryStorage(long secStorageId, String newUrl); void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool); @@ -351,24 +378,19 @@ static Boolean getFullCloneConfiguration(Long storeId) { /** * This comment is relevant to managed storage only. - * * Long clusterId = only used for managed storage - * * Some managed storage can be more efficient handling VM templates (via cloning) if it knows the capabilities of the compute cluster it is dealing with. * If the compute cluster supports UUID resigning and the storage system can clone a volume from a volume, then this determines how much more space a * new root volume (that makes use of a template) will take up on the storage system. - * * For example, if a storage system can clone a volume from a volume and the compute cluster supports UUID resigning (relevant for hypervisors like * XenServer and ESXi that put virtual disks in clustered file systems), then the storage system will need to determine if it already has a copy of * the template or if it will need to create one first before cloning the template to a new volume to be used for the new root disk (assuming the root - * disk is being deployed from a template). If the template doesn't already exists on the storage system, then you need to take into consideration space + * disk is being deployed from a template). If the template doesn't already exist on the storage system, then you need to take into consideration space * required for that template (stored in one volume) and space required for a new volume created from that template volume (for your new root volume). - * * If UUID resigning is not available in the compute cluster or the storage system doesn't support cloning a volume from a volume, then for each new * root disk that uses a template, CloudStack will have the template be copied down to a newly created volume on the storage system (i.e. no need * to take into consideration the possible need to first create a volume on the storage system for a template that will be used for the root disk * via cloning). - * * Cloning volumes on the back-end instead of copying down a new template for each new volume helps to alleviate load on the hypervisors. */ boolean storagePoolHasEnoughSpace(List> volume, StoragePool pool, Long clusterId); diff --git a/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java b/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java index 889d0ce14ccc..e4a577646ac2 100644 --- a/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java +++ b/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java @@ -32,6 +32,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.configuration.ConfigurationManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -161,7 +162,7 @@ public void setRunLevel(int level) { public boolean configure(String name, Map params) throws ConfigurationException { cacheReplacementEnabled = Boolean.parseBoolean(configDao.getValue(Config.StorageCacheReplacementEnabled.key())); cacheReplaceMentInterval = NumbersUtil.parseInt(configDao.getValue(Config.StorageCacheReplacementInterval.key()), 86400); - workers = NumbersUtil.parseInt(configDao.getValue(Config.ExpungeWorkers.key()), 10); + workers = ConfigurationManager.ExpungeWorkers.value(); executors = Executors.newScheduledThreadPool(workers, new NamedThreadFactory("StorageCacheManager-cache-replacement")); return true; } diff --git a/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java b/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java index 9b2f3ddd1001..e5fe2c4b9235 100644 --- a/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java +++ b/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java @@ -25,6 +25,7 @@ import javax.inject.Inject; +import com.cloud.configuration.ConfigurationManager; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -37,7 +38,6 @@ import com.cloud.agent.api.to.S3TO; import com.cloud.configuration.Config; import com.cloud.storage.Storage.ImageFormat; -import com.cloud.utils.NumbersUtil; import com.cloud.utils.storage.S3.S3Utils; public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl { @@ -86,22 +86,18 @@ public String createEntityExtractUrl(DataStore store, String key, ImageFormat fo */ S3TO s3 = (S3TO)getStoreTO(store); - if(logger.isDebugEnabled()) { - logger.debug("Generating pre-signed s3 entity extraction URL for object: " + key); - } + logger.debug("Generating pre-signed s3 entity extraction URL for object: {}", key); Date expiration = new Date(); long milliSeconds = expiration.getTime(); // Get extract url expiration interval set in global configuration (in seconds) - String urlExpirationInterval = _configDao.getValue(Config.ExtractURLExpirationInterval.toString()); - // Expired after configured interval (in milliseconds), default 14400 seconds - milliSeconds += 1000 * NumbersUtil.parseInt(urlExpirationInterval, 14400); + milliSeconds += 1000L * ConfigurationManager.ExtractURLExpirationInterval.value(); expiration.setTime(milliSeconds); URL s3url = S3Utils.generatePresignedUrl(s3, s3.getBucketName(), key, expiration); - logger.info("Pre-Signed URL = " + s3url.toString()); + logger.info("Pre-Signed URL = {}", s3url.toString()); return s3url.toString(); } diff --git a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java index 61ff57fd0587..6670de1e4691 100644 --- a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java +++ b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java @@ -24,6 +24,7 @@ import javax.inject.Inject; +import com.cloud.configuration.ConfigurationManager; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -33,7 +34,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao; import org.apache.cloudstack.storage.image.BaseImageStoreDriverImpl; @@ -44,7 +44,6 @@ import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.SwiftTO; -import com.cloud.configuration.Config; import com.cloud.storage.Storage.ImageFormat; import com.cloud.utils.SwiftUtil; import com.cloud.utils.exception.CloudRuntimeException; @@ -57,8 +56,6 @@ public class SwiftImageStoreDriverImpl extends BaseImageStoreDriverImpl { EndPointSelector _epSelector; @Inject StorageCacheManager cacheManager; - @Inject - ConfigurationDao _configDao; @Override public DataStoreTO getStoreTO(DataStore store) { @@ -83,15 +80,11 @@ public String createEntityExtractUrl(DataStore store, String installPath, ImageF String containerName = SwiftUtil.getContainerName(dataObject.getType().toString(), dataObject.getId()); String objectName = installPath.split("\\/")[1]; // Get extract url expiration interval set in global configuration (in seconds) - int urlExpirationInterval = Integer.parseInt(_configDao.getValue(Config.ExtractURLExpirationInterval.toString())); + int urlExpirationInterval = ConfigurationManager.ExtractURLExpirationInterval.value(); URL swiftUrl = SwiftUtil.generateTempUrl(swiftTO, containerName, objectName, tempKey, urlExpirationInterval); - if (swiftUrl != null) { - logger.debug("Swift temp-url: " + swiftUrl.toString()); - return swiftUrl.toString(); - } - - throw new CloudRuntimeException("Unable to create extraction URL"); + logger.debug("Swift temp-url: {}", swiftUrl); + return swiftUrl.toString(); } @Override @@ -115,7 +108,7 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal throw new CloudRuntimeException(errMsg); } - CreateContext context = new CreateContext(callback, data); + CreateContext context = new CreateContext<>(callback, data); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setContext(context); @@ -125,7 +118,5 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal caller.setCallback(caller.getTarget().createVolumeAsyncCallback(null, null)); } ep.sendMessageAsync(dcmd, caller); - } - } diff --git a/server/src/main/java/com/cloud/configuration/Config.java b/server/src/main/java/com/cloud/configuration/Config.java index abae4d3996cb..cc79c426522f 100644 --- a/server/src/main/java/com/cloud/configuration/Config.java +++ b/server/src/main/java/com/cloud/configuration/Config.java @@ -424,31 +424,6 @@ public enum Config { "The interval (in seconds) between cleanup for removed accounts", null), InstanceName("Advanced", AgentManager.class, String.class, "instance.name", "VM", "Name of the deployment instance.", "instanceName"), - ExpungeDelay( - "Advanced", - UserVmManager.class, - Integer.class, - "expunge.delay", - "86400", - "Determines how long (in seconds) to wait before actually expunging destroyed vm. The default value = the default value of expunge.interval", - null), - ExpungeInterval( - "Advanced", - UserVmManager.class, - Integer.class, - "expunge.interval", - "86400", - "The interval (in seconds) to wait before running the expunge thread.", - null), - ExpungeWorkers("Advanced", UserVmManager.class, Integer.class, "expunge.workers", "1", "Number of workers performing expunge ", null), - ExtractURLCleanUpInterval( - "Advanced", - ManagementServer.class, - Integer.class, - "extract.url.cleanup.interval", - "7200", - "The interval (in seconds) to wait before cleaning up the extract URL's ", - null), DisableExtraction( "Advanced", ManagementServer.class, @@ -457,14 +432,6 @@ public enum Config { "false", "Flag for disabling extraction of Templates, ISOs, Snapshots and volumes", null), - ExtractURLExpirationInterval( - "Advanced", - ManagementServer.class, - Integer.class, - "extract.url.expiration.interval", - "14400", - "The life of an extract URL after which it is deleted ", - null), HostStatsInterval( "Advanced", ManagementServer.class, @@ -750,38 +717,6 @@ public enum Config { "3600", "Time (in seconds) to wait before taking over a VM in transition state", null), - VmDiskThrottlingIopsReadRate( - "Advanced", - ManagementServer.class, - Integer.class, - "vm.disk.throttling.iops_read_rate", - "0", - "Default disk I/O read rate in requests per second allowed in User vm's disk.", - null), - VmDiskThrottlingIopsWriteRate( - "Advanced", - ManagementServer.class, - Integer.class, - "vm.disk.throttling.iops_write_rate", - "0", - "Default disk I/O writerate in requests per second allowed in User vm's disk.", - null), - VmDiskThrottlingBytesReadRate( - "Advanced", - ManagementServer.class, - Integer.class, - "vm.disk.throttling.bytes_read_rate", - "0", - "Default disk I/O read rate in bytes per second allowed in User vm's disk.", - null), - VmDiskThrottlingBytesWriteRate( - "Advanced", - ManagementServer.class, - Integer.class, - "vm.disk.throttling.bytes_write_rate", - "0", - "Default disk I/O writerate in bytes per second allowed in User vm's disk.", - null), ControlCidr( "Advanced", ManagementServer.class, diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 21995d5ae650..93ac9396aab7 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -8489,7 +8489,8 @@ public ConfigKey[] getConfigKeys() { BYTES_MAX_READ_LENGTH, BYTES_MAX_WRITE_LENGTH, ADD_HOST_ON_SERVICE_RESTART_KVM, SET_HOST_DOWN_TO_MAINTENANCE, VM_SERVICE_OFFERING_MAX_CPU_CORES, VM_SERVICE_OFFERING_MAX_RAM_SIZE, MIGRATE_VM_ACROSS_CLUSTERS, ENABLE_ACCOUNT_SETTINGS_FOR_DOMAIN, ENABLE_DOMAIN_SETTINGS_FOR_CHILD_DOMAIN, - ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS, DELETE_QUERY_BATCH_SIZE, AllowNonRFC1918CompliantIPs, HostCapacityTypeCpuMemoryWeight + ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS, DELETE_QUERY_BATCH_SIZE, AllowNonRFC1918CompliantIPs, HostCapacityTypeCpuMemoryWeight, + ExpungeDelay, ExpungeInterval, ExpungeWorkers, ExtractURLCleanUpInterval, ExtractURLExpirationInterval }; } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index c1cee1385c6f..dd1817335524 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -18,16 +18,12 @@ import static com.cloud.utils.NumbersUtil.toHumanReadableSize; -import java.io.UnsupportedEncodingException; import java.math.BigDecimal; import java.math.BigInteger; import java.net.URI; import java.net.URISyntaxException; import java.net.URLDecoder; -import java.net.UnknownHostException; import java.nio.file.Files; -import java.sql.PreparedStatement; -import java.sql.ResultSet; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -35,7 +31,6 @@ import java.util.Date; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -148,8 +143,8 @@ import org.apache.cloudstack.storage.object.ObjectStore; import org.apache.cloudstack.storage.object.ObjectStoreEntity; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.collections4.MapUtils; import org.apache.commons.lang.time.DateUtils; import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.EnumUtils; @@ -182,7 +177,6 @@ import com.cloud.capacity.CapacityVO; import com.cloud.capacity.dao.CapacityDao; import com.cloud.cluster.ClusterManagerListener; -import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.ConfigurationManagerImpl; import com.cloud.configuration.Resource.ResourceType; @@ -198,11 +192,9 @@ import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.ConnectionException; import com.cloud.exception.DiscoveryException; -import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.PermissionDeniedException; -import com.cloud.exception.ResourceInUseException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.StorageConflictException; import com.cloud.exception.StorageUnavailableException; @@ -219,11 +211,9 @@ import com.cloud.org.Grouping; import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceState; -import com.cloud.server.ConfigurationServer; import com.cloud.server.ManagementServer; import com.cloud.server.ManagementService; import com.cloud.server.StatsCollector; -import com.cloud.service.dao.ServiceOfferingDetailsDao; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.Volume.Type; @@ -273,7 +263,6 @@ import com.cloud.vm.DiskProfile; import com.cloud.vm.UserVmManager; import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.VMInstanceDao; import com.google.common.collect.Sets; @@ -356,8 +345,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject SnapshotDataFactory snapshotFactory; @Inject - ConfigurationServer _configServer; - @Inject DataStoreManager _dataStoreMgr; @Inject DataStoreProviderManager _dataStoreProviderMgr; @@ -374,8 +361,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject SnapshotService _snapshotService; @Inject - public StorageService storageService; - @Inject StoragePoolTagsDao _storagePoolTagsDao; @Inject StoragePoolAndAccessGroupMapDao _storagePoolAccessGroupMapDao; @@ -384,8 +369,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject DiskOfferingDetailsDao _diskOfferingDetailsDao; @Inject - ServiceOfferingDetailsDao _serviceOfferingDetailsDao; - @Inject VsphereStoragePolicyDao _vsphereStoragePolicyDao; @Inject private AnnotationDao annotationDao; @@ -414,20 +397,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject ResourceManager _resourceMgr; @Inject - StorageManager storageManager; - @Inject ManagementService managementService; - protected List _discoverers; - - public List getDiscoverers() { - return _discoverers; - } - - public void setDiscoverers(List discoverers) { - _discoverers = discoverers; - } - protected GenericSearchBuilder UpHostsInPoolSearch; protected SearchBuilder StoragePoolSearch; protected SearchBuilder LocalStorageSearch; @@ -445,44 +416,6 @@ public void setDiscoverers(List discoverers) { private static final String NFS_MOUNT_OPTIONS_INCORRECT = "An incorrect mount option was specified"; - public boolean share(VMInstanceVO vm, List vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException { - - // if pool is in maintenance and it is the ONLY pool available; reject - List rootVolForGivenVm = volumeDao.findByInstanceAndType(vm.getId(), Type.ROOT); - if (rootVolForGivenVm != null && rootVolForGivenVm.size() > 0) { - boolean isPoolAvailable = isPoolAvailable(rootVolForGivenVm.get(0).getPoolId()); - if (!isPoolAvailable) { - throw new StorageUnavailableException("Can not share " + vm, rootVolForGivenVm.get(0).getPoolId()); - } - } - - // this check is done for maintenance mode for primary storage - // if any one of the volume is unusable, we return false - // if we return false, the allocator will try to switch to another PS if - // available - for (VolumeVO vol : vols) { - if (vol.getRemoved() != null) { - logger.warn("Volume: {} is removed, cannot share on this instance: {}", vol, vm); - // not ok to share - return false; - } - } - // ok to share - return true; - } - - private boolean isPoolAvailable(Long poolId) { - // get list of all pools - List pools = _storagePoolDao.listAll(); - - // if no pools or 1 pool which is in maintenance - if (pools == null || pools.size() == 0 || (pools.size() == 1 && pools.get(0).getStatus().equals(StoragePoolStatus.Maintenance))) { - return false; - } else { - return true; - } - } - protected void enableDefaultDatastoreDownloadRedirectionForExistingInstallations() { if (!configDepot.isNewConfig(DataStoreDownloadFollowRedirects)) { logger.trace("{} is not a new configuration, skipping updating its value", @@ -492,35 +425,12 @@ protected void enableDefaultDatastoreDownloadRedirectionForExistingInstallations List zones = _dcDao.listAll(new Filter(1)); if (CollectionUtils.isNotEmpty(zones)) { - logger.debug(String.format("Updating value for configuration: %s to true", - DataStoreDownloadFollowRedirects.key())); + logger.debug("Updating value for configuration: {} to true", + DataStoreDownloadFollowRedirects.key()); configurationDao.update(DataStoreDownloadFollowRedirects.key(), "true"); } } - @Override - public List ListByDataCenterHypervisor(long datacenterId, HypervisorType type) { - List pools = _storagePoolDao.listByDataCenterId(datacenterId); - List retPools = new ArrayList<>(); - for (StoragePoolVO pool : pools) { - if (pool.getStatus() != StoragePoolStatus.Up) { - continue; - } - if (pool.getScope() == ScopeType.ZONE) { - if (pool.getHypervisor() != null && pool.getHypervisor() == type) { - retPools.add(pool); - } - } else { - ClusterVO cluster = _clusterDao.findById(pool.getClusterId()); - if (type == cluster.getHypervisorType()) { - retPools.add(pool); - } - } - } - Collections.shuffle(retPools); - return retPools; - } - @Override public boolean isLocalStorageActiveOnHost(Long hostId) { List storagePoolHostRefs = _storagePoolHostDao.listByHostId(hostId); @@ -542,7 +452,7 @@ public boolean isLocalStorageActiveOnHost(Long hostId) { volumeSC.setJoinParameters("activeVmSB", "state", State.Starting, State.Running, State.Stopping, State.Migrating); List volumes = volumeDao.search(volumeSC, null); - if (volumes.size() > 0) { + if (!volumes.isEmpty()) { return true; } } @@ -611,31 +521,6 @@ public boolean canPoolProvideStorageStats(StoragePool pool) { return storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canProvideStorageStats(); } - @Override - public boolean poolProvidesCustomStorageStats(StoragePool pool) { - DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); - DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); - return storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).poolProvidesCustomStorageStats(); - } - - @Override - public Map getCustomStorageStats(StoragePool pool) { - if (pool == null) { - return null; - } - - if (!pool.isManaged()) { - return null; - } - - DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); - DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); - if (storeDriver instanceof PrimaryDataStoreDriver) { - return ((PrimaryDataStoreDriver)storeDriver).getCustomStorageStats(pool); - } - return null; - } - @Override public Answer getVolumeStats(StoragePool pool, Command cmd) { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); @@ -657,56 +542,27 @@ public Answer getVolumeStats(StoragePool pool, Command cmd) { return new GetVolumeStatsAnswer(getVolumeStatsCommand, "", statEntry); } - public Long chooseHostForStoragePool(StoragePoolVO poolVO, List avoidHosts, boolean sendToVmResidesOn, Long vmId) { - if (sendToVmResidesOn) { - if (vmId != null) { - VMInstanceVO vmInstance = _vmInstanceDao.findById(vmId); - if (vmInstance != null) { - Long hostId = vmInstance.getHostId(); - if (hostId != null && !avoidHosts.contains(vmInstance.getHostId())) { - return hostId; - } - } - } - /* - * Can't find the vm where host resides on(vm is destroyed? or - * volume is detached from vm), randomly choose a host to send the - * cmd - */ - } - List poolHosts = _storagePoolHostDao.listByHostStatus(poolVO.getId(), Status.Up); - Collections.shuffle(poolHosts); - if (poolHosts != null && poolHosts.size() > 0) { - for (StoragePoolHostVO sphvo : poolHosts) { - if (!avoidHosts.contains(sphvo.getHostId())) { - return sphvo.getHostId(); - } - } - } - return null; - } - @Override public boolean configure(String name, Map params) { Map configs = _configDao.getConfiguration("management-server", params); _storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800); - logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds"); + logger.info("pool.acquisition.wait.seconds is configured as {} seconds", _storagePoolAcquisitionWaitSeconds); _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _storagePoolHostDao, _dataStoreProviderMgr), true, false, true); - logger.info("Storage cleanup enabled: " + StorageCleanupEnabled.value() + ", interval: " + StorageCleanupInterval.value() + ", delay: " + StorageCleanupDelay.value() - + ", template cleanup enabled: " + TemplateCleanupEnabled.value()); + logger.info("Storage cleanup enabled: {}, interval: {}, delay: {}, template cleanup enabled: {}", + StorageCleanupEnabled.value(), + StorageCleanupInterval.value(), + StorageCleanupDelay.value(), + TemplateCleanupEnabled.value()); - String cleanupInterval = configs.get("extract.url.cleanup.interval"); - _downloadUrlCleanupInterval = NumbersUtil.parseInt(cleanupInterval, 7200); + _downloadUrlCleanupInterval = ConfigurationManager.ExtractURLCleanUpInterval.value(); - String urlExpirationInterval = configs.get("extract.url.expiration.interval"); - _downloadUrlExpirationInterval = NumbersUtil.parseInt(urlExpirationInterval, 14400); + _downloadUrlExpirationInterval = ConfigurationManager.ExtractURLExpirationInterval.value(); - String workers = configs.get("expunge.workers"); - int wrks = NumbersUtil.parseInt(workers, 10); - _executor = Executors.newScheduledThreadPool(wrks, new NamedThreadFactory("StorageManager-Scavenger")); + int workers = ConfigurationManager.ExpungeWorkers.value(); + _executor = Executors.newScheduledThreadPool(workers, new NamedThreadFactory("StorageManager-Scavenger")); _agentMgr.registerForHostEvents(ComponentContext.inject(LocalStoragePoolListener.class), true, false, false); @@ -829,7 +685,7 @@ public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws Con boolean useLocalStorageForSystemVM = false; Boolean isLocal = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dc.getId()); if (isLocal != null) { - useLocalStorageForSystemVM = isLocal.booleanValue(); + useLocalStorageForSystemVM = isLocal; } if (!(dc.isLocalStorageEnabled() || useLocalStorageForSystemVM)) { return null; @@ -850,7 +706,7 @@ public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws Con // not able to distinguish multiple local datastores that may be // available on the host, to support smooth migration, we // need to perform runtime upgrade here - if (pInfo.getHostPath().length() > 0) { + if (!pInfo.getHostPath().isEmpty()) { pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, "", pInfo.getUuid()); } } @@ -858,7 +714,9 @@ public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws Con //the path can be different, but if they have the same uuid, assume they are the same storage pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, null, pInfo.getUuid()); if (pool != null) { - logger.debug("Found a storage pool: " + pInfo.getUuid() + ", but with different hostpath " + pInfo.getHostPath() + ", still treat it as the same pool"); + logger.debug("Found a storage pool: {}, but with different hostpath {}, still treat it as the same pool", + pInfo.getUuid(), + pInfo.getHostPath()); } } @@ -961,7 +819,7 @@ protected void checkNFSMountOptionsForUpdate(Map details, Storag } @Override - public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException { + public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws IllegalArgumentException { String providerName = cmd.getStorageProviderName(); Map uriParams = extractUriParamsAsMap(cmd.getUrl()); boolean isFileScheme = "file".equals(uriParams.get("scheme")); @@ -997,7 +855,7 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource throw new InvalidParameterValueException("zone id can't be null, if scope is zone"); } - HypervisorType hypervisorType = HypervisorType.KVM; + HypervisorType hypervisorType; // defaults to HypervisorType.KVM if (scopeType == ScopeType.ZONE) { // ignore passed clusterId and podId clusterId = null; @@ -1072,7 +930,7 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource lifeCycle.attachZone(store, zoneScope, hypervisorType); } } catch (Exception e) { - logger.debug("Failed to add data store: " + e.getMessage(), e); + logger.debug("Failed to add data store: {}", e.getMessage(), e); try { // clean up the db, just absorb the exception thrown in deletion with error logged, so that user can get error for adding data store // not deleting data store. @@ -1080,7 +938,7 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource lifeCycle.deleteDataStore(store); } } catch (Exception ex) { - logger.debug("Failed to clean up storage pool: " + ex.getMessage()); + logger.debug("Failed to clean up storage pool: {}", ex.getMessage()); } throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e); } @@ -1094,9 +952,7 @@ protected Map extractUriParamsAsMap(String url) { try { uriInfo = UriUtils.getUriInfo(url); } catch (CloudRuntimeException cre) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("URI validation for url: %s failed, returning empty uri params", url)); - } + logger.debug("URI validation for url: {} failed, returning empty uri params", url); return uriParams; } @@ -1104,9 +960,7 @@ protected Map extractUriParamsAsMap(String url) { String storageHost = uriInfo.getStorageHost(); String storagePath = uriInfo.getStoragePath(); if (scheme == null) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Scheme for url: %s is not found, returning empty uri params", url)); - } + logger.debug("Scheme for url: {} is not found, returning empty uri params", url); return uriParams; } boolean isHostOrPathBlank = StringUtils.isAnyBlank(storagePath, storageHost); @@ -1139,12 +993,7 @@ protected Map extractUriParamsAsMap(String url) { } } - String hostPath = null; - try { - hostPath = URLDecoder.decode(storagePath, "UTF-8"); - } catch (UnsupportedEncodingException e) { - logger.error("[ignored] we are on a platform not supporting \"UTF-8\"!?!", e); - } + String hostPath = URLDecoder.decode(storagePath, StringUtils.getPreferredCharset()); if (hostPath == null) { // if decoding fails, use getPath() anyway hostPath = storagePath; } @@ -1159,17 +1008,15 @@ protected Map extractUriParamsAsMap(String url) { return uriParams; } - private Map extractApiParamAsMap(Map ds) { + private Map extractApiParamAsMap(Map ds) { Map details = new HashMap<>(); if (ds != null) { - Collection detailsCollection = ds.values(); - Iterator it = detailsCollection.iterator(); - while (it.hasNext()) { - HashMap d = (HashMap)it.next(); - Iterator it2 = d.entrySet().iterator(); - while (it2.hasNext()) { - Map.Entry entry = (Map.Entry)it2.next(); - details.put((String)entry.getKey(), (String)entry.getValue()); + Collection detailsCollection = ds.values(); + for (Object o : detailsCollection) { + HashMap d = (HashMap) o; + for (Object object : d.entrySet()) { + Map.Entry entry = (Map.Entry) object; + details.put((String) entry.getKey(), (String) entry.getValue()); } } } @@ -1233,17 +1080,14 @@ public PrimaryDataStoreInfo updateStoragePool(UpdateStoragePoolCmd cmd) throws I String name = cmd.getName(); if(StringUtils.isNotBlank(name)) { - logger.debug("Updating Storage Pool name to: " + name); + logger.debug("Updating Storage Pool name to: {}", name); pool.setName(name); _storagePoolDao.update(pool.getId(), pool); } - final List storagePoolTags = cmd.getTags(); if (storagePoolTags != null) { - if (logger.isDebugEnabled()) { - logger.debug("Updating Storage Pool Tags to :" + storagePoolTags); - } + logger.debug("Updating Storage Pool Tags to : {}", storagePoolTags); if (pool.getPoolType() == StoragePoolType.DatastoreCluster) { List childStoragePools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(pool.getId()); for (StoragePoolVO childPool : childStoragePools) { @@ -1274,9 +1118,8 @@ public PrimaryDataStoreInfo updateStoragePool(UpdateStoragePoolCmd cmd) throws I } // retrieve current details and merge/overlay input to capture changes - Map details = null; - details = _storagePoolDetailsDao.listDetailsKeyPairs(id); - if (inputDetails != null) { + Map details = _storagePoolDetailsDao.listDetailsKeyPairs(id); + if (!inputDetails.isEmpty()) { details.putAll(inputDetails); changes = true; } @@ -1288,11 +1131,11 @@ public PrimaryDataStoreInfo updateStoragePool(UpdateStoragePoolCmd cmd) throws I if (dataStoreLifeCycle instanceof PrimaryDataStoreLifeCycle) { if (updatedCapacityBytes != null) { - details.put(PrimaryDataStoreLifeCycle.CAPACITY_BYTES, updatedCapacityBytes != null ? String.valueOf(updatedCapacityBytes) : null); + details.put(PrimaryDataStoreLifeCycle.CAPACITY_BYTES, String.valueOf(updatedCapacityBytes)); _storagePoolDao.updateCapacityBytes(id, updatedCapacityBytes); } if (updatedCapacityIops != null) { - details.put(PrimaryDataStoreLifeCycle.CAPACITY_IOPS, updatedCapacityIops != null ? String.valueOf(updatedCapacityIops) : null); + details.put(PrimaryDataStoreLifeCycle.CAPACITY_IOPS, String.valueOf(updatedCapacityIops)); _storagePoolDao.updateCapacityIops(id, updatedCapacityIops); } if (cmd.getUrl() != null) { @@ -1339,9 +1182,7 @@ private void changeStoragePoolScopeToCluster(StoragePoolVO primaryStorage, Long throw new PermissionDeniedException("Cannot perform this operation, Cluster is currently disabled: " + clusterId); } - List states = Arrays.asList(State.Starting, State.Running, State.Stopping, State.Migrating, State.Restoring); - - Long id = primaryStorage.getId(); + long id = primaryStorage.getId(); Pair, Integer> vmsNotInClusterUsingPool = _vmInstanceDao.listByVmsNotInClusterUsingPool(clusterId, id); if (vmsNotInClusterUsingPool.second() != 0) { throw new CloudRuntimeException(String.format("Cannot change scope of the storage pool [%s] to cluster [%s] " + @@ -1528,9 +1369,7 @@ public boolean configureStorageAccess(ConfigureStorageAccessCmd cmd) { throw new CloudRuntimeException("Storage Access Groups are not suitable for local storage"); } - if (logger.isDebugEnabled()) { - logger.debug("Updating Storage Pool Access Group Maps to :" + storageAccessGroups); - } + logger.debug("Updating Storage Pool Access Group Maps to : {}", storageAccessGroups); if (storagePool.getPoolType() == StoragePoolType.DatastoreCluster) { List childStoragePools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(storagePool.getId()); @@ -1553,9 +1392,8 @@ protected void checkIfStorageAccessGroupsExistsOnZone(long zoneId, List String storageAccessGroupsOnZone = zoneVO.getStorageAccessGroups(); List zoneTagsList = parseTags(storageAccessGroupsOnZone); - List newTags = storageAccessGroups; - List existingTagsOnZone = (List) CollectionUtils.intersection(newTags, zoneTagsList); + List existingTagsOnZone = new ArrayList<>(CollectionUtils.intersection(storageAccessGroups, zoneTagsList)); if (CollectionUtils.isNotEmpty(existingTagsOnZone)) { throw new CloudRuntimeException(String.format("access groups already exist on the zone: %s", existingTagsOnZone)); @@ -1571,10 +1409,9 @@ protected void checkIfStorageAccessGroupsExistsOnPod(long podId, List st List podTagsList = parseTags(storageAccessGroupsOnPod); List zoneTagsList = parseTags(storageAccessGroupsOnZone); - List newTags = storageAccessGroups; - List existingTagsOnPod = (List) CollectionUtils.intersection(newTags, podTagsList); - List existingTagsOnZone = (List) CollectionUtils.intersection(newTags, zoneTagsList); + List existingTagsOnPod = new ArrayList<>(CollectionUtils.intersection(storageAccessGroups, podTagsList)); + List existingTagsOnZone = new ArrayList<>(CollectionUtils.intersection(storageAccessGroups, zoneTagsList)); if (CollectionUtils.isNotEmpty(existingTagsOnPod) || CollectionUtils.isNotEmpty(existingTagsOnZone)) { String message = "access groups already exist "; @@ -1605,11 +1442,10 @@ protected void checkIfStorageAccessGroupsExistsOnCluster(long clusterId, List podTagsList = parseTags(storageAccessGroupsOnPod); List zoneTagsList = parseTags(storageAccessGroupsOnZone); List clusterTagsList = parseTags(storageAccessGroupsOnCluster); - List newTags = storageAccessGroups; - List existingTagsOnCluster = (List) CollectionUtils.intersection(newTags, clusterTagsList); - List existingTagsOnPod = (List) CollectionUtils.intersection(newTags, podTagsList); - List existingTagsOnZone = (List) CollectionUtils.intersection(newTags, zoneTagsList); + List existingTagsOnCluster = new ArrayList<>(CollectionUtils.intersection(storageAccessGroups, clusterTagsList)); + List existingTagsOnPod = new ArrayList<>(CollectionUtils.intersection(storageAccessGroups, podTagsList)); + List existingTagsOnZone = new ArrayList<>(CollectionUtils.intersection(storageAccessGroups, zoneTagsList)); if (CollectionUtils.isNotEmpty(existingTagsOnCluster) || CollectionUtils.isNotEmpty(existingTagsOnPod) || CollectionUtils.isNotEmpty(existingTagsOnZone)) { String message = "access groups already exist "; @@ -1674,7 +1510,7 @@ public boolean deletePool(DeletePoolCmd cmd) { StoragePoolVO sPool = _storagePoolDao.findById(id); if (sPool == null) { - logger.warn("Unable to find pool:" + id); + logger.warn("Unable to find pool: {}", id); throw new InvalidParameterValueException("Unable to find pool by id " + id); } if (sPool.getStatus() != StoragePoolStatus.Maintenance) { @@ -1683,7 +1519,7 @@ public boolean deletePool(DeletePoolCmd cmd) { } if (sPool.getPoolType() == StoragePoolType.DatastoreCluster) { - // FR41 yet to handle on failure of deletion of any of the child storage pool + // FR41 yet to handle on failure of deletion any child storage pool if (checkIfDataStoreClusterCanbeDeleted(sPool, forced)) { Transaction.execute(new TransactionCallbackNoReturn() { @Override @@ -1705,17 +1541,15 @@ public void doInTransactionWithoutResult(TransactionStatus status) { @Override public Pair, Boolean> getStoragePoolNFSMountOpts(StoragePool pool, Map details) { boolean details_added = false; - if (!pool.getPoolType().equals(Storage.StoragePoolType.NetworkFilesystem)) { - return new Pair<>(details, details_added); - } - - StoragePoolDetailVO nfsMountOpts = _storagePoolDetailsDao.findDetail(pool.getId(), ApiConstants.NFS_MOUNT_OPTIONS); - if (nfsMountOpts != null) { - if (details == null) { - details = new HashMap<>(); + if (pool.getPoolType().equals(Storage.StoragePoolType.NetworkFilesystem)) { + StoragePoolDetailVO nfsMountOpts = _storagePoolDetailsDao.findDetail(pool.getId(), ApiConstants.NFS_MOUNT_OPTIONS); + if (nfsMountOpts != null) { + if (details == null) { + details = new HashMap<>(); + } + details.put(ApiConstants.NFS_MOUNT_OPTIONS, nfsMountOpts.getValue()); + details_added = true; } - details.put(ApiConstants.NFS_MOUNT_OPTIONS, nfsMountOpts.getValue()); - details_added = true; } return new Pair<>(details, details_added); } @@ -1931,7 +1765,7 @@ public void enableHost(long hostId) { } } catch (Exception ex) { - logger.error("hostEnabled(long) failed for storage provider " + provider.getName(), ex); + logger.error("hostEnabled(long) failed for storage provider {}", provider.getName(), ex); } } } @@ -1940,7 +1774,7 @@ public void enableHost(long hostId) { @Override public BigDecimal getStorageOverProvisioningFactor(Long poolId) { - return new BigDecimal(CapacityManager.StorageOverprovisioningFactor.valueIn(poolId)); + return BigDecimal.valueOf(CapacityManager.StorageOverprovisioningFactor.valueIn(poolId)); } @Override @@ -1992,7 +1826,7 @@ public void createCapacityEntry(StoragePoolVO storagePool, short capacityType, l } } - if (capacities.size() == 0) { + if (capacities.isEmpty()) { CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), storagePool.getClusterId(), allocated, totalOverProvCapacity, capacityType); capacity.setCapacityState(capacityState); @@ -2034,7 +1868,7 @@ public Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirs if (hostIdsToAvoid != null) { hostIds.removeAll(hostIdsToAvoid); } - if (hostIds == null || hostIds.isEmpty()) { + if (hostIds.isEmpty()) { throw new StorageUnavailableException(String.format("Unable to send command to the pool %s due to there is no enabled hosts up in this cluster", pool), pool.getId()); } for (Long hostId : hostIds) { @@ -2045,7 +1879,7 @@ public Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirs long targetHostId = _hvGuruMgr.getGuruProcessedCommandTargetHost(hostId, cmd); answers.add(_agentMgr.send(targetHostId, cmd)); } - return new Pair<>(hostId, answers.toArray(new Answer[answers.size()])); + return new Pair<>(hostId, answers.toArray(new Answer[0])); } catch (AgentUnavailableException | OperationTimedoutException e) { logger.debug("Unable to send storage pool command to {} via {}", pool::toString, () -> _hostDao.findById(hostId), () -> e); } @@ -2104,8 +1938,8 @@ public void cleanupStorage(boolean recurring) { _tmpltMgr.evictTemplateFromStoragePool(templatePoolVO); } } catch (Exception e) { - logger.error(String.format("Failed to clean up primary storage pool [%s] due to: [%s].", pool, e.getMessage())); - logger.debug(String.format("Failed to clean up primary storage pool [%s].", pool), e); + logger.error("Failed to clean up primary storage pool [{}] due to: [{}].", pool, e.getMessage()); + logger.debug("Failed to clean up primary storage pool [{}].", pool, e); } } } @@ -2119,7 +1953,8 @@ public void cleanupStorage(boolean recurring) { if (logger.isDebugEnabled()) { snapshot = _snapshotDao.findById(snapshotDataStoreVO.getSnapshotId()); if (snapshot == null) { - logger.warn(String.format("Did not find snapshot [ID: %d] for which store reference is in destroying state; therefore, it cannot be destroyed.", snapshotDataStoreVO.getSnapshotId())); + logger.warn("Did not find snapshot [ID: {}] for which store reference is in destroying state; therefore, it cannot be destroyed.", + snapshotDataStoreVO.getSnapshotId()); continue; } snapshotUuid = snapshot.getUuid(); @@ -2127,22 +1962,21 @@ public void cleanupStorage(boolean recurring) { try { if (logger.isDebugEnabled()) { - logger.debug(String.format("Verifying if snapshot [%s] is in destroying state in %s data store ID: %d.", snapshotUuid, storeRole, snapshotDataStoreVO.getDataStoreId())); + logger.debug("Verifying if snapshot [{}] is in destroying state in {} data store ID: {}.", + snapshotUuid, storeRole, snapshotDataStoreVO.getDataStoreId()); } SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snapshotDataStoreVO.getSnapshotId(), snapshotDataStoreVO.getDataStoreId(), snapshotDataStoreVO.getRole()); if (snapshotInfo != null) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Snapshot [%s] in destroying state found in %s data store [%s]; therefore, it will be destroyed.", snapshotUuid, storeRole, snapshotInfo.getDataStore().getUuid())); - } + logger.debug("Snapshot [{}] in destroying state found in %s data store [{}]; therefore, it will be destroyed.", + snapshotUuid, storeRole, snapshotInfo.getDataStore().getUuid()); _snapshotService.deleteSnapshot(snapshotInfo); - } else if (logger.isDebugEnabled()) { - logger.debug(String.format("Did not find snapshot [%s] in destroying state in %s data store ID: %d.", snapshotUuid, storeRole, snapshotDataStoreVO.getDataStoreId())); + } else { + logger.debug("Did not find snapshot [{}] in destroying state in {} data store ID: {}.", + snapshotUuid, storeRole, snapshotDataStoreVO.getDataStoreId()); } } catch (Exception e) { logger.error("Failed to delete snapshot [{}] from storage due to: [{}].", snapshot, e.getMessage()); - if (logger.isDebugEnabled()) { - logger.debug("Failed to delete snapshot [{}] from storage.", snapshot, e); - } + logger.debug("Failed to delete snapshot [{}] from storage.", snapshot, e); } } cleanupSecondaryStorage(recurring); @@ -2158,7 +1992,7 @@ public void cleanupStorage(boolean recurring) { } } if (isVolumeSuspectedDestroyDuplicateOfVmVolume(vol)) { - logger.warn(String.format("Skipping cleaning up %s as it could be a duplicate for another volume on same pool", vol)); + logger.warn("Skipping cleaning up {} as it could be a duplicate for another volume on same pool", vol); continue; } try { @@ -2204,7 +2038,7 @@ public void cleanupStorage(boolean recurring) { for (VolumeDataStoreVO volumeDataStore : volumeDataStores) { VolumeVO volume = volumeDao.findById(volumeDataStore.getVolumeId()); if (volume == null) { - logger.warn(String.format("Uploaded volume [%s] not found, so cannot be destroyed.", volumeDataStore.getVolumeId())); + logger.warn("Uploaded volume [{}] not found, so cannot be destroyed.", volumeDataStore.getVolumeId()); continue; } try { @@ -2216,7 +2050,7 @@ public void cleanupStorage(boolean recurring) { } Host host = _hostDao.findById(ep.getId()); if (host != null && host.getManagementServerId() != null) { - if (_serverId == host.getManagementServerId().longValue()) { + if (_serverId == host.getManagementServerId()) { volService.destroyVolume(volume.getId()); // decrement volume resource count _resourceLimitMgr.decrementVolumeResourceCount(volume.getAccountId(), volume.isDisplayVolume(), @@ -2244,7 +2078,7 @@ public void cleanupStorage(boolean recurring) { for (TemplateDataStoreVO templateDataStore : templateDataStores) { VMTemplateVO template = _templateDao.findById(templateDataStore.getTemplateId()); if (template == null) { - logger.warn(String.format("Uploaded template [%s] not found, so cannot be destroyed.", templateDataStore.getTemplateId())); + logger.warn("Uploaded template [{}] not found, so cannot be destroyed.", templateDataStore.getTemplateId()); continue; } try { @@ -2256,7 +2090,7 @@ public void cleanupStorage(boolean recurring) { } Host host = _hostDao.findById(ep.getId()); if (host != null && host.getManagementServerId() != null) { - if (_serverId == host.getManagementServerId().longValue()) { + if (_serverId == host.getManagementServerId()) { AsyncCallFuture future = _imageSrv.deleteTemplateAsync(tmplFactory.getTemplate(template.getId(), dataStore)); TemplateApiResult result = future.get(); if (!result.isSuccess()) { @@ -2274,7 +2108,7 @@ public void cleanupStorage(boolean recurring) { _templateStoreDao.removeByTemplateStore(template.getId(), dataStore.getId()); // find all eligible image stores for this template List imageStores = _tmpltMgr.getImageStoreByTemplate(template.getId(), null); - if (imageStores == null || imageStores.size() == 0) { + if (imageStores == null || imageStores.isEmpty()) { template.setState(VirtualMachineTemplate.State.Inactive); _templateDao.update(template.getId(), template); @@ -2316,7 +2150,7 @@ protected boolean isVolumeSuspectedDestroyDuplicateOfVmVolume(VolumeVO gcVolume) List vmUsableVolumes = volumeDao.findUsableVolumesForInstance(vmId); for (VolumeVO vol : vmUsableVolumes) { if (gcVolume.getPoolId().equals(vol.getPoolId()) && gcVolume.getPath().equals(vol.getPath())) { - logger.debug(String.format("%s meant for garbage collection could a possible duplicate for %s", gcVolume, vol)); + logger.debug("{} meant for garbage collection could a possible duplicate for {}", gcVolume, vol); return true; } } @@ -2325,10 +2159,8 @@ protected boolean isVolumeSuspectedDestroyDuplicateOfVmVolume(VolumeVO gcVolume) /** * This method only applies for managed storage. - * * For XenServer and vSphere, see if we need to remove an SR or a datastore, then remove the underlying volume * from any applicable access control list (before other code attempts to delete the volume that supports it). - * * For KVM, just tell the underlying storage plug-in to remove the volume from any applicable access control list * (before other code attempts to delete the volume that supports it). */ @@ -2379,52 +2211,10 @@ private void handleManagedStorage(Volume volume) { } } - @DB - List findAllVolumeIdInSnapshotTable(Long storeId) { - String sql = "SELECT volume_id from snapshots, snapshot_store_ref WHERE snapshots.id = snapshot_store_ref.snapshot_id and store_id=? GROUP BY volume_id"; - List list = new ArrayList<>(); - try { - TransactionLegacy txn = TransactionLegacy.currentTxn(); - ResultSet rs = null; - PreparedStatement pstmt = null; - pstmt = txn.prepareAutoCloseStatement(sql); - pstmt.setLong(1, storeId); - rs = pstmt.executeQuery(); - while (rs.next()) { - list.add(rs.getLong(1)); - } - return list; - } catch (Exception e) { - logger.debug("failed to get all volumes who has snapshots in secondary storage " + storeId + " due to " + e.getMessage()); - return null; - } - - } - - List findAllSnapshotForVolume(Long volumeId) { - String sql = "SELECT backup_snap_id FROM snapshots WHERE volume_id=? and backup_snap_id is not NULL"; - try { - TransactionLegacy txn = TransactionLegacy.currentTxn(); - ResultSet rs = null; - PreparedStatement pstmt = null; - pstmt = txn.prepareAutoCloseStatement(sql); - pstmt.setLong(1, volumeId); - rs = pstmt.executeQuery(); - List list = new ArrayList<>(); - while (rs.next()) { - list.add(rs.getString(1)); - } - return list; - } catch (Exception e) { - logger.debug("failed to get all snapshots for a volume " + volumeId + " due to " + e.getMessage()); - return null; - } - } - @Override @DB public void cleanupSecondaryStorage(boolean recurring) { - // NOTE that object_store refactor will immediately delete the object from secondary storage when deleteTemplate etc api is issued. + // NOTE that object_store refactor will immediately delete the object from secondary storage when deleteTemplate etc. api is issued. // so here we don't need to issue DeleteCommand to resource anymore, only need to remove db entry. try { // Cleanup templates in template_store_ref @@ -2436,7 +2226,7 @@ public void cleanupSecondaryStorage(boolean recurring) { logger.debug("Secondary storage garbage collector found {} Templates to cleanup on template_store_ref for store: {}", destroyedTemplateStoreVOs.size(), store); for (TemplateDataStoreVO destroyedTemplateStoreVO : destroyedTemplateStoreVOs) { if (logger.isDebugEnabled()) { - logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO); + logger.debug("Deleting template store DB entry: {}", destroyedTemplateStoreVO); } _templateStoreDao.remove(destroyedTemplateStoreVO.getId()); } @@ -2454,13 +2244,11 @@ public void cleanupSecondaryStorage(boolean recurring) { // check if this snapshot has child SnapshotInfo snap = snapshotFactory.getSnapshot(destroyedSnapshotStoreVO.getSnapshotId(), store); if (snap.getChild() != null) { - logger.debug("Skip snapshot on store: " + destroyedSnapshotStoreVO + " , because it has child"); + logger.debug("Skip snapshot on store: {} , because it has child", destroyedSnapshotStoreVO); continue; } - if (logger.isDebugEnabled()) { - logger.debug("Deleting snapshot store DB entry: " + destroyedSnapshotStoreVO); - } + logger.debug("Deleting snapshot store DB entry: {}", destroyedSnapshotStoreVO); List imageStoreRefs = _snapshotStoreDao.listBySnapshotAndDataStoreRole(destroyedSnapshotStoreVO.getSnapshotId(), DataStoreRole.Image); if (imageStoreRefs.size() <= 1) { @@ -2468,9 +2256,7 @@ public void cleanupSecondaryStorage(boolean recurring) { } SnapshotDataStoreVO snapshotOnPrimary = _snapshotStoreDao.findDestroyedReferenceBySnapshot(destroyedSnapshotStoreVO.getSnapshotId(), DataStoreRole.Primary); if (snapshotOnPrimary != null) { - if (logger.isDebugEnabled()) { - logger.debug("Deleting snapshot on primary store reference DB entry: " + snapshotOnPrimary); - } + logger.debug("Deleting snapshot on primary store reference DB entry: {}", snapshotOnPrimary); _snapshotStoreDao.remove(snapshotOnPrimary.getId()); } _snapshotStoreDao.remove(destroyedSnapshotStoreVO.getId()); @@ -2489,9 +2275,7 @@ public void cleanupSecondaryStorage(boolean recurring) { destroyedStoreVOs.addAll(_volumeDataStoreDao.listByVolumeState(Volume.State.Expunged)); logger.debug("Secondary storage garbage collector found {} volumes to cleanup on volume_store_ref for store: {}", destroyedStoreVOs.size(), store); for (VolumeDataStoreVO destroyedStoreVO : destroyedStoreVOs) { - if (logger.isDebugEnabled()) { - logger.debug("Deleting volume store DB entry: " + destroyedStoreVO); - } + logger.debug("Deleting volume store DB entry: {}", destroyedStoreVO); _volumeStoreDao.remove(destroyedStoreVO.getId()); } @@ -2504,24 +2288,12 @@ public void cleanupSecondaryStorage(boolean recurring) { } } - @Override - public String getPrimaryStorageNameLabel(VolumeVO volume) { - Long poolId = volume.getPoolId(); - - // poolId is null only if volume is destroyed, which has been checked - // before. - assert poolId != null; - StoragePoolVO primaryDataStoreVO = _storagePoolDao.findById(poolId); - assert primaryDataStoreVO != null; - return primaryDataStoreVO.getUuid(); - } - @Override @DB @ActionEvent(eventType = EventTypes.EVENT_MAINTENANCE_PREPARE_PRIMARY_STORAGE, eventDescription = "preparing storage pool for maintenance", async = true) - public PrimaryDataStoreInfo preparePrimaryStorageForMaintenance(Long primaryStorageId) throws ResourceUnavailableException, InsufficientCapacityException { - StoragePoolVO primaryStorage = null; + public PrimaryDataStoreInfo preparePrimaryStorageForMaintenance(Long primaryStorageId) { + StoragePoolVO primaryStorage; primaryStorage = _storagePoolDao.findById(primaryStorageId); if (primaryStorage == null) { @@ -2566,14 +2338,12 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } } }); - for (Iterator iteratorChildDatastore = childDatastores.listIterator(); iteratorChildDatastore.hasNext(); ) { - DataStore childStore = _dataStoreMgr.getDataStore(iteratorChildDatastore.next().getId(), DataStoreRole.Primary); + for (StoragePoolVO datastore : childDatastores) { + DataStore childStore = _dataStoreMgr.getDataStore(datastore.getId(), DataStoreRole.Primary); try { lifeCycle.maintain(childStore); } catch (Exception e) { - if (logger.isDebugEnabled()) { - logger.debug("Exception on maintenance preparation of one of the child datastores in datastore cluster {} with error {}", datastoreCluster, e); - } + logger.debug("Exception on maintenance preparation of one of the child datastores in datastore cluster {}", datastoreCluster, e); // Set to ErrorInMaintenance state of all child storage pools and datastore cluster for (StoragePoolVO childDatastore : childDatastores) { childDatastore.setStatus(StoragePoolStatus.ErrorInMaintenance); @@ -2592,7 +2362,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { eventDescription = "canceling maintenance for primary storage pool", async = true) public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance(CancelPrimaryStorageMaintenanceCmd cmd) throws ResourceUnavailableException { Long primaryStorageId = cmd.getId(); - StoragePoolVO primaryStorage = null; + StoragePoolVO primaryStorage; primaryStorage = _storagePoolDao.findById(primaryStorageId); @@ -2649,7 +2419,7 @@ public StoragePool syncStoragePool(SyncStoragePoolCmd cmd) { List poolIds = new ArrayList<>(); poolIds.add(poolId); List hosts = _storagePoolHostDao.findHostsConnectedToPools(poolIds); - if (hosts.size() > 0) { + if (!hosts.isEmpty()) { Long hostId = hosts.get(0); ModifyStoragePoolCommand modifyStoragePoolCommand = new ModifyStoragePoolCommand(true, pool); final Answer answer = _agentMgr.easySend(hostId, modifyStoragePoolCommand); @@ -2766,9 +2536,9 @@ public void syncDatastoreClusterStoragePool(long datastoreClusterPoolId, List set = new LinkedHashSet<>(storageTags); storageTags.clear(); storageTags.addAll(set); - if (logger.isDebugEnabled()) { - logger.debug("Updating Storage Pool Tags to :" + storageTags); - } + logger.debug("Updating Storage Pool Tags to : {}", storageTags); _storagePoolTagsDao.persist(storageTags); } } else { @@ -2887,11 +2655,11 @@ public boolean checkIfHostAndStoragePoolHasCommonStorageAccessGroups(Host host, } if (ArrayUtils.isNotEmpty(hostStorageAccessGroups)) { - logger.debug(String.format("Storage access groups on the host %s are %s", host, hostStorageAccessGroups)); + logger.debug("Storage access groups on the host {} are {}", host, hostStorageAccessGroups); } if (CollectionUtils.isNotEmpty(storagePoolAccessGroups)) { - logger.debug(String.format("Storage access groups on the storage pool %s are %s", host, storagePoolAccessGroups)); + logger.debug("Storage access groups on the storage pool {} are {}", host, storagePoolAccessGroups); } List hostTagList = Arrays.asList(hostStorageAccessGroups); @@ -2914,8 +2682,8 @@ public Pair checkIfReadyVolumeFitsInStoragePoolWithStorageAcces List destStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(destPool.getId()); if (CollectionUtils.isNotEmpty(srcStorageAccessGroups) && CollectionUtils.isNotEmpty(destStorageAccessGroups)) { - logger.debug(String.format("Storage access groups on source storage %s are %s and destination storage %s are %s", - srcPool, srcStorageAccessGroups, destPool, destStorageAccessGroups)); + logger.debug("Storage access groups on source storage {} are {} and destination storage {} are {}", + srcPool, srcStorageAccessGroups, destPool, destStorageAccessGroups); List intersection = new ArrayList<>(srcStorageAccessGroups); intersection.retainAll(destStorageAccessGroups); if (CollectionUtils.isNotEmpty(intersection)) { @@ -2941,16 +2709,14 @@ public Pair checkIfReadyVolumeFitsInStoragePoolWithStorageAcces List srcStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(srcPoolId); List destStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(destPool.getId()); - logger.debug(String.format("Storage access groups on source storage %s are %s and destination storage %s are %s", - srcPool, srcStorageAccessGroups, destPool, destStorageAccessGroups)); + logger.debug("Storage access groups on source storage {} are {} and destination storage {} are {}", + srcPool, srcStorageAccessGroups, destPool, destStorageAccessGroups); if (CollectionUtils.isEmpty(srcStorageAccessGroups) && CollectionUtils.isEmpty(destStorageAccessGroups)) { return new Pair<>(true, "Success"); } if (CollectionUtils.isNotEmpty(srcStorageAccessGroups) && CollectionUtils.isNotEmpty(destStorageAccessGroups)) { - List intersection = new ArrayList<>(srcStorageAccessGroups); - intersection.retainAll(destStorageAccessGroups); if (ArrayUtils.isNotEmpty(hostStorageAccessGroups)) { boolean hasSrcCommon = srcStorageAccessGroups.stream() @@ -3129,7 +2895,7 @@ public void onManagementNodeLeft(List nodeList, if (vo.getMsid() == _serverId) { logger.info("Cleaning up storage maintenance jobs associated with Management server: {}", vo); List poolIds = _storagePoolWorkDao.searchForPoolIdsForPendingWorkJobs(vo.getMsid()); - if (poolIds.size() > 0) { + if (!poolIds.isEmpty()) { for (Long poolId : poolIds) { StoragePoolVO pool = _storagePoolDao.findById(poolId); // check if pool is in an inconsistent state @@ -3326,17 +3092,6 @@ public boolean canHostPrepareStoragePoolAccess(Host host, StoragePool pool) { return storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostPrepareStoragePoolAccess(host, pool); } - @Override - public boolean canDisconnectHostFromStoragePool(Host host, StoragePool pool) { - if (pool == null || !pool.isManaged()) { - return true; - } - - DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); - DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); - return storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canDisconnectHostFromStoragePool(host, pool); - } - @Override @DB public Host getHost(long hostId) { @@ -3344,7 +3099,7 @@ public Host getHost(long hostId) { } @Override - public Host updateSecondaryStorage(long secStorageId, String newUrl) { + public void updateSecondaryStorage(long secStorageId, String newUrl) { HostVO secHost = _hostDao.findById(secStorageId); if (secHost == null) { throw new InvalidParameterValueException("Can not find out the secondary storage id: " + secStorageId); @@ -3384,14 +3139,13 @@ public Host updateSecondaryStorage(long secStorageId, String newUrl) { throw new InvalidParameterValueException("can not change old scheme:" + oldUri.getScheme() + " to " + uri.getScheme()); } } catch (URISyntaxException e) { - logger.debug("Failed to get uri from " + oldUrl); + logger.debug("Failed to get uri from {}", oldUrl); } secHost.setStorageUrl(newUrl); secHost.setGuid(newUrl); secHost.setName(newUrl); _hostDao.update(secHost.getId(), secHost); - return secHost; } @Override @@ -3477,14 +3231,15 @@ protected boolean storagePoolHasEnoughIops(long requestedIops, List> requestedVolumes, StoragePool pool) { if (requestedVolumes == null || requestedVolumes.isEmpty() || pool == null) { - logger.debug(String.format("Cannot check if storage [%s] has enough IOPS to allocate volumes [%s].", pool, requestedVolumes)); + logger.debug("Cannot check if storage [{}] has enough IOPS to allocate volumes [{}].", + pool, requestedVolumes); return false; } if (checkIfPoolIopsCapacityNull(pool)) { @@ -3535,12 +3290,12 @@ public boolean storagePoolHasEnoughSpace(List> volumeD @Override public boolean storagePoolHasEnoughSpace(List> volumeDiskProfilesList, StoragePool pool, Long clusterId) { if (CollectionUtils.isEmpty(volumeDiskProfilesList)) { - logger.debug(String.format("Cannot check if pool [%s] has enough space to allocate volumes because the volumes list is empty.", pool)); + logger.debug("Cannot check if pool [{}] has enough space to allocate volumes because the volumes list is empty.", pool); return false; } if (!checkUsagedSpace(pool)) { - logger.debug(String.format("Cannot allocate pool [%s] because there is not enough space in this pool.", pool)); + logger.debug("Cannot allocate pool [{}] because there is not enough space in this pool.", pool); return false; } @@ -3569,7 +3324,7 @@ public boolean storagePoolHasEnoughSpace(List> volumeD volumeVO = volumeDao.findById(volume.getId()); } - // this if statement should resolve to true at most once per execution of the for loop its contained within (for a root disk that is + // this if statement should resolve to true at most once per execution of the for loop it's contained within (for a root disk that is // to leverage a template) if (volume.getTemplateId() != null) { VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(volume.getTemplateId()); @@ -3631,10 +3386,10 @@ protected Answer getCheckDatastorePolicyComplianceAnswer(String storagePolicyId, long targetHostId = _hvGuruMgr.getGuruProcessedCommandTargetHost(hostIds.get(0), cmd); return _agentMgr.send(targetHostId, cmd); } catch (AgentUnavailableException e) { - logger.debug("Unable to send storage pool command to " + pool + " via " + hostIds.get(0), e); + logger.debug("Unable to send storage pool command to {} via {}", pool, hostIds.get(0), e); throw new StorageUnavailableException("Unable to send command to the pool ", pool.getId()); } catch (OperationTimedoutException e) { - logger.debug("Failed to process storage pool command to " + pool + " via " + hostIds.get(0), e); + logger.debug("Failed to process storage pool command to {} via {}", pool, hostIds.get(0), e); throw new StorageUnavailableException("Failed to process storage command to the pool ", pool.getId()); } } @@ -3718,14 +3473,14 @@ protected boolean checkPoolforSpace(StoragePool pool, long allocatedSizeWithTemp return false; } if (!AllowVolumeReSizeBeyondAllocation.valueIn(pool.getId())) { - logger.debug(String.format("Skipping the pool %s as %s is false", pool, AllowVolumeReSizeBeyondAllocation.key())); + logger.debug("Skipping the pool {} as {} is false", pool, AllowVolumeReSizeBeyondAllocation.key()); return false; } double storageAllocatedThresholdForResize = CapacityManager.StorageAllocatedCapacityDisableThresholdForVolumeSize.valueIn(pool.getId()); if (usedPercentage > storageAllocatedThresholdForResize) { - logger.debug(String.format("Skipping the pool %s since its allocated percentage: %s has crossed the allocated %s: %s", - pool, usedPercentage, CapacityManager.StorageAllocatedCapacityDisableThresholdForVolumeSize.key(), storageAllocatedThresholdForResize)); + logger.debug("Skipping the pool {} since its allocated percentage: {} has crossed the allocated {}: {}", + pool, usedPercentage, CapacityManager.StorageAllocatedCapacityDisableThresholdForVolumeSize.key(), storageAllocatedThresholdForResize); return false; } } @@ -3747,7 +3502,6 @@ protected boolean checkPoolforSpace(StoragePool pool, long allocatedSizeWithTemp /** * Storage plug-ins for managed storage can be designed in such a way as to store a template on the primary storage once and * make use of it via storage-side cloning. - * * This method determines how many more bytes it will need for the template (if the template is already stored on the primary storage, * then the answer is 0). */ @@ -3829,34 +3583,37 @@ private long getBytesRequiredForTemplate(VMTemplateVO tmpl, StoragePool pool) { @Override public boolean storagePoolCompatibleWithVolumePool(StoragePool pool, Volume volume) { if (pool == null || volume == null) { - logger.debug(String.format("Cannot check if storage pool [%s] is compatible with volume [%s].", pool, volume)); + logger.debug("Cannot check if storage pool [{}] is compatible with volume [{}].", pool, volume); return false; } if (volume.getPoolId() == null) { - logger.debug(String.format("Volume [%s] is not allocated to any pool. Cannot check compatibility with pool [%s].", volume, pool)); + logger.debug("Volume [{}] is not allocated to any pool. Cannot check compatibility with pool [{}].", + volume, pool); return true; } StoragePool volumePool = _storagePoolDao.findById(volume.getPoolId()); if (volumePool == null) { - logger.debug(String.format("Pool [%s] used by volume [%s] does not exist. Cannot check compatibility.", pool, volume)); + logger.debug("Pool [{}] used by volume [{}] does not exist. Cannot check compatibility.", pool, volume); return true; } if (volume.getState() == Volume.State.Ready) { if (volumePool.getPoolType() == Storage.StoragePoolType.PowerFlex && pool.getPoolType() != Storage.StoragePoolType.PowerFlex) { - logger.debug(String.format("Pool [%s] with type [%s] does not match volume [%s] pool type [%s].", pool, pool.getPoolType(), volume, volumePool.getPoolType())); + logger.debug("Pool [{}] with type [{}] does not match volume [{}] pool type [{}].", + pool, pool.getPoolType(), volume, volumePool.getPoolType()); return false; } else if (volumePool.getPoolType() != Storage.StoragePoolType.PowerFlex && pool.getPoolType() == Storage.StoragePoolType.PowerFlex) { - logger.debug(String.format("Pool [%s] with type [%s] does not match volume [%s] pool type [%s].", pool, pool.getPoolType(), volume, volumePool.getPoolType())); + logger.debug("Pool [{}] with type [{}] does not match volume [{}] pool type [{}].", + pool, pool.getPoolType(), volume, volumePool.getPoolType()); return false; } } else { - logger.debug(String.format("Cannot check compatibility of pool [%s] because volume [%s] is not in [%s] state.", pool, volume, Volume.State.Ready)); + logger.debug("Cannot check compatibility of pool [{}] because volume [{}] is not in [{}] state.", pool, volume, Volume.State.Ready); return false; } - logger.debug(String.format("Pool [%s] is compatible with volume [%s].", pool, volume)); + logger.debug("Pool [{}] is compatible with volume [{}].", pool, volume); return true; } @@ -3903,7 +3660,7 @@ private String getValidTemplateName(Long zoneId, HypervisorType hType) { return templateName; } @Override - public ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException { + public ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, InvalidParameterValueException { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName); if (storeProvider == null) { @@ -3974,7 +3731,7 @@ public ImageStore discoverImageStore(String name, String url, String providerNam store = lifeCycle.initialize(params); } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug("Failed to add data store: " + e.getMessage(), e); + logger.debug("Failed to add data store: {}", e.getMessage(), e); } throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e); } @@ -4043,14 +3800,10 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { if (CollectionUtils.isEmpty(stores)) { List> hypervisorTypes = _clusterDao.listDistinctHypervisorsAndArchExcludingExternalType(zoneId); - TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister"); - SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration(); String filePath = null; - try { + try (TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister")) { + SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration(); filePath = Files.createTempDirectory(SystemVmTemplateRegistration.TEMPORARY_SECONDARY_STORE).toString(); - if (filePath == null) { - throw new CloudRuntimeException("Failed to create temporary file path to mount the store"); - } Pair storeUrlAndId = new Pair<>(url, store.getId()); String nfsVersion = imageStoreDetailsUtil.getNfsVersion(store.getId()); for (Pair hypervisorArchType : hypervisorTypes) { @@ -4068,7 +3821,6 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { logger.error("Failed to register systemVM template(s) due to: ", e); } finally { SystemVmTemplateRegistration.unmountStore(filePath); - txn.close(); } } } @@ -4080,7 +3832,7 @@ public ImageStore migrateToObjectStore(String name, String url, String providerN // check if current cloud is ready to migrate, we only support cloud with only NFS secondary storages List imgStores = _imageStoreDao.listImageStores(); List nfsStores = new ArrayList<>(); - if (imgStores != null && imgStores.size() > 0) { + if (CollectionUtils.isNotEmpty(imgStores)) { for (ImageStoreVO store : imgStores) { if (!store.getProviderName().equals(DataStoreProvider.NFS_IMAGE)) { throw new InvalidParameterValueException("We only support migrate NFS secondary storage to use object store!"); @@ -4090,7 +3842,7 @@ public ImageStore migrateToObjectStore(String name, String url, String providerN } } // convert all NFS secondary storage to staging store - if (nfsStores != null && nfsStores.size() > 0) { + if (CollectionUtils.isNotEmpty(nfsStores)) { for (ImageStoreVO store : nfsStores) { long storeId = store.getId(); @@ -4188,11 +3940,17 @@ public void updateStorageCapabilities(Long poolId, boolean failOnChecks) { List poolIds = new ArrayList<>(); poolIds.add(pool.getId()); List hosts = _storagePoolHostDao.findHostsConnectedToPools(poolIds); - if (hosts.size() > 0) { + if (!hosts.isEmpty()) { GetStoragePoolCapabilitiesCommand cmd = new GetStoragePoolCapabilitiesCommand(); cmd.setPool(new StorageFilerTO(pool)); GetStoragePoolCapabilitiesAnswer answer = (GetStoragePoolCapabilitiesAnswer) _agentMgr.easySend(hosts.get(0), cmd); - if (answer.getPoolDetails() != null && answer.getPoolDetails().containsKey(Storage.Capability.HARDWARE_ACCELERATION.toString())) { + if (answer == null) { + String msg = "Unable to get storage capabilities from storage pool: " + pool.getName(); + logger.error(msg); + if (failOnChecks) { + throw new CloudRuntimeException(msg); + } + } else if (answer.getPoolDetails() != null && answer.getPoolDetails().containsKey(Storage.Capability.HARDWARE_ACCELERATION.toString())) { StoragePoolDetailVO hardwareAccelerationSupported = _storagePoolDetailsDao.findDetail(pool.getId(), Storage.Capability.HARDWARE_ACCELERATION.toString()); if (hardwareAccelerationSupported == null) { StoragePoolDetailVO storagePoolDetailVO = new StoragePoolDetailVO(pool.getId(), Storage.Capability.HARDWARE_ACCELERATION.toString(), answer.getPoolDetails().get(Storage.Capability.HARDWARE_ACCELERATION.toString()), false); @@ -4201,12 +3959,10 @@ public void updateStorageCapabilities(Long poolId, boolean failOnChecks) { hardwareAccelerationSupported.setValue(answer.getPoolDetails().get(Storage.Capability.HARDWARE_ACCELERATION.toString())); _storagePoolDetailsDao.update(hardwareAccelerationSupported.getId(), hardwareAccelerationSupported); } - } else { - if (answer != null && !answer.getResult()) { - logger.error("Failed to update storage pool capabilities: " + answer.getDetails()); - if (failOnChecks) { - throw new CloudRuntimeException(answer.getDetails()); - } + } else if (!answer.getResult()) { + logger.error("Failed to update storage pool capabilities: {}", answer.getDetails()); + if (failOnChecks) { + throw new CloudRuntimeException(answer.getDetails()); } } } @@ -4260,17 +4016,17 @@ public boolean deleteImageStore(DeleteImageStoreCmd cmd) { // Verify that there are no live snapshot, template, volume on the image // store to be deleted List snapshots = _snapshotStoreDao.listByStoreId(storeId, DataStoreRole.Image); - if (snapshots != null && snapshots.size() > 0) { + if (CollectionUtils.isNotEmpty(snapshots)) { throw new InvalidParameterValueException("Cannot delete image store with active snapshots backup!"); } List volumes = _volumeStoreDao.listByStoreId(storeId); - if (volumes != null && volumes.size() > 0) { + if (CollectionUtils.isNotEmpty(volumes)) { throw new InvalidParameterValueException("Cannot delete image store with active volumes backup!"); } // search if there are user templates stored on this image store, excluding system, builtin templates List templates = _templateViewDao.listActiveTemplates(storeId); - if (templates != null && templates.size() > 0) { + if (CollectionUtils.isNotEmpty(templates)) { throw new InvalidParameterValueException("Cannot delete image store with active Templates backup!"); } @@ -4349,11 +4105,11 @@ public ImageStore createSecondaryStagingStore(CreateSecondaryStagingStoreCmd cmd params.put("role", DataStoreRole.ImageCache); DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle(); - DataStore store = null; + DataStore store; try { store = lifeCycle.initialize(params); } catch (Exception e) { - logger.debug("Failed to add data store: " + e.getMessage(), e); + logger.debug("Failed to add data store: {}", e.getMessage(), e); throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e); } @@ -4373,16 +4129,16 @@ public boolean deleteSecondaryStagingStore(DeleteSecondaryStagingStoreCmd cmd) { // Verify that there are no live snapshot, template, volume on the cache // store that is currently referenced List snapshots = _snapshotStoreDao.listActiveOnCache(storeId); - if (snapshots != null && snapshots.size() > 0) { + if (CollectionUtils.isNotEmpty(snapshots)) { throw new InvalidParameterValueException("Cannot delete cache store with staging snapshots currently in use!"); } List volumes = _volumeStoreDao.listActiveOnCache(storeId); - if (volumes != null && volumes.size() > 0) { + if (CollectionUtils.isNotEmpty(volumes)) { throw new InvalidParameterValueException("Cannot delete cache store with staging Volumes currently in use!"); } List templates = _templateStoreDao.listActiveOnCache(storeId); - if (templates != null && templates.size() > 0) { + if (CollectionUtils.isNotEmpty(templates)) { throw new InvalidParameterValueException("Cannot delete cache store with staging Templates currently in use!"); } @@ -4519,7 +4275,7 @@ public Long getDiskBytesReadRate(final ServiceOffering offering, final DiskOffer } else if ((diskOffering != null) && (diskOffering.getBytesReadRate() != null) && (diskOffering.getBytesReadRate() > 0)) { return diskOffering.getBytesReadRate(); } else { - Long bytesReadRate = Long.parseLong(_configDao.getValue(Config.VmDiskThrottlingBytesReadRate.key())); + long bytesReadRate = VmDiskThrottlingBytesReadRate.value(); if ((bytesReadRate > 0) && ((offering == null) || (!offering.isSystemUse()))) { return bytesReadRate; } @@ -4533,7 +4289,7 @@ public Long getDiskBytesWriteRate(final ServiceOffering offering, final DiskOffe if ((diskOffering != null) && (diskOffering.getBytesWriteRate() != null) && (diskOffering.getBytesWriteRate() > 0)) { return diskOffering.getBytesWriteRate(); } else { - Long bytesWriteRate = Long.parseLong(_configDao.getValue(Config.VmDiskThrottlingBytesWriteRate.key())); + long bytesWriteRate = VmDiskThrottlingBytesWriteRate.value(); if ((bytesWriteRate > 0) && ((offering == null) || (!offering.isSystemUse()))) { return bytesWriteRate; } @@ -4547,7 +4303,7 @@ public Long getDiskIopsReadRate(final ServiceOffering offering, final DiskOfferi if ((diskOffering != null) && (diskOffering.getIopsReadRate() != null) && (diskOffering.getIopsReadRate() > 0)) { return diskOffering.getIopsReadRate(); } else { - Long iopsReadRate = Long.parseLong(_configDao.getValue(Config.VmDiskThrottlingIopsReadRate.key())); + long iopsReadRate = VmDiskThrottlingIopsReadRate.value(); if ((iopsReadRate > 0) && ((offering == null) || (!offering.isSystemUse()))) { return iopsReadRate; } @@ -4561,7 +4317,7 @@ public Long getDiskIopsWriteRate(final ServiceOffering offering, final DiskOffer if ((diskOffering != null) && (diskOffering.getIopsWriteRate() != null) && (diskOffering.getIopsWriteRate() > 0)) { return diskOffering.getIopsWriteRate(); } else { - Long iopsWriteRate = Long.parseLong(_configDao.getValue(Config.VmDiskThrottlingIopsWriteRate.key())); + long iopsWriteRate = VmDiskThrottlingIopsWriteRate.value(); if ((iopsWriteRate > 0) && ((offering == null) || (!offering.isSystemUse()))) { return iopsWriteRate; } @@ -4603,7 +4359,11 @@ public ConfigKey[] getConfigKeys() { AllowVolumeReSizeBeyondAllocation, StoragePoolHostConnectWorkers, ObjectStorageCapacityThreshold, - COPY_PUBLIC_TEMPLATES_FROM_OTHER_STORAGES + COPY_PUBLIC_TEMPLATES_FROM_OTHER_STORAGES, + VmDiskThrottlingBytesReadRate, + VmDiskThrottlingBytesWriteRate, + VmDiskThrottlingIopsReadRate, + VmDiskThrottlingIopsWriteRate }; } @@ -4617,8 +4377,8 @@ public void setDiskProfileThrottling(DiskProfile dskCh, final ServiceOffering of @Override public DiskTO getDiskWithThrottling(final DataTO volTO, final Volume.Type volumeType, final long deviceId, final String path, final long offeringId, final long diskOfferingId) { - DiskTO disk = null; - if (volTO != null && volTO instanceof VolumeObjectTO) { + DiskTO disk; + if (volTO instanceof VolumeObjectTO) { VolumeObjectTO volumeTO = (VolumeObjectTO)volTO; ServiceOffering offering = _entityMgr.findById(ServiceOffering.class, offeringId); DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, diskOfferingId); @@ -4637,10 +4397,7 @@ public DiskTO getDiskWithThrottling(final DataTO volTO, final Volume.Type volume @Override public boolean isStoragePoolDatastoreClusterParent(StoragePool pool) { List childStoragePools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(pool.getId()); - if (childStoragePools != null && !childStoragePools.isEmpty()) { - return true; - } - return false; + return CollectionUtils.isNotEmpty(childStoragePools); } private void setVolumeObjectTOThrottling(VolumeObjectTO volumeTO, final ServiceOffering offering, final DiskOffering diskOffering) { @@ -4652,7 +4409,7 @@ private void setVolumeObjectTOThrottling(VolumeObjectTO volumeTO, final ServiceO @Override @ActionEvent(eventType = EventTypes.EVENT_OBJECT_STORE_CREATE, eventDescription = "creating object storage") - public ObjectStore discoverObjectStore(String name, String url, Long size, String providerName, Map details) + public ObjectStore discoverObjectStore(String name, String url, Long size, String providerName, Map details) throws IllegalArgumentException, InvalidParameterValueException { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName); @@ -4682,11 +4439,7 @@ public ObjectStore discoverObjectStore(String name, String url, Long size, Strin Map params = new HashMap<>(); params.put("url", url); params.put("name", name); - if (size == null) { - params.put("size", 0L); - } else { - params.put("size", size); - } + params.put("size", size == null ? 0L : size); params.put("providerName", storeProvider.getName()); params.put("role", DataStoreRole.Object); params.put("details", details); @@ -4698,9 +4451,9 @@ public ObjectStore discoverObjectStore(String name, String url, Long size, Strin store = lifeCycle.initialize(params); } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug("Failed to add object store: " + e.getMessage(), e); + logger.debug("Failed to add object store: {}", e.getMessage(), e); } - throw new CloudRuntimeException("Failed to add object store: " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to add object store: {}" + e.getMessage(), e); } return (ObjectStore)_dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Object); @@ -4718,7 +4471,7 @@ public boolean deleteObjectStore(DeleteObjectStoragePoolCmd cmd) { // Verify that there are no buckets in the store List buckets = _bucketDao.listByObjectStoreId(storeId); - if(buckets != null && buckets.size() > 0) { + if(CollectionUtils.isNotEmpty(buckets)) { throw new InvalidParameterValueException("Cannot delete object store with buckets"); } @@ -4791,7 +4544,6 @@ public CapacityVO getObjectStorageUsedStats(Long zoneId) { total += objectStore.getTotalSize(); } } - CapacityVO capacity = new CapacityVO(null, zoneId, null, null, allocated, total, Capacity.CAPACITY_TYPE_OBJECT_STORAGE); - return capacity; + return new CapacityVO(null, zoneId, null, null, allocated, total, Capacity.CAPACITY_TYPE_OBJECT_STORAGE); } }